From 44c7a5eb8c375d77f685abf585961421e5f8b228 Mon Sep 17 00:00:00 2001 From: Bill Laboon Date: Tue, 7 Nov 2023 16:20:36 +0100 Subject: [PATCH 001/199] Fix "slashaed" typo (#2205) # Description This merely fixes a typo in the documentation, replacing the typo "slashaed" with "slashed". Since external entities use the comments for explanations of events, this will then be shown externally. I noticed this when reviewing [this event](https://polkadot.subscan.io/extrinsic/0xb6bc1e3abde0c2ed9c500c74cfc64cdb8179e5d9af97f4bf53242ce4cdd15a1d?event=18064194-6) on Subscan. This is not related to any other issues or PRs. --- substrate/frame/referenda/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/referenda/src/lib.rs b/substrate/frame/referenda/src/lib.rs index be21375f526..8912f9ad217 100644 --- a/substrate/frame/referenda/src/lib.rs +++ b/substrate/frame/referenda/src/lib.rs @@ -305,7 +305,7 @@ pub mod pallet { /// The amount placed by the account. amount: BalanceOf, }, - /// A deposit has been slashaed. + /// A deposit has been slashed. DepositSlashed { /// The account who placed the deposit. who: T::AccountId, -- GitLab From 8ebb5c3319fa52d68f2d76f90f5787a96de254be Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 7 Nov 2023 17:21:24 +0100 Subject: [PATCH 002/199] Disable incoming light-client connections for minimal relay node (#2202) When running with `--relay-chain-rpc-url` we received multiple reports of high traffic that disappears when `--in-peers-light 0` is set. Indeed it does not make much sense for light clients to connect to the minimal node since it is not running the block announce protocol and the request/response protocol for light clients. This is intended to alleviate the traffic issues for now. closes #1896 probably related https://github.com/paritytech/cumulus/issues/2563 --- .../relay-chain-minimal-node/src/network.rs | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs index f39d7a26dd8..813dca47a03 100644 --- a/cumulus/client/relay-chain-minimal-node/src/network.rs +++ b/cumulus/client/relay-chain-minimal-node/src/network.rs @@ -19,7 +19,8 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; use sc_network::{ config::{ - NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, + NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, + ProtocolId, SetConfig, }, peer_store::PeerStore, NetworkService, @@ -35,7 +36,7 @@ use std::{iter, sync::Arc}; /// Build the network service, the network status sinks and an RPC sender. pub(crate) fn build_collator_network( config: &Configuration, - network_config: FullNetworkConfiguration, + mut full_network_config: FullNetworkConfiguration, spawn_handle: SpawnTaskHandle, genesis_hash: Hash, best_header: Header, @@ -53,8 +54,12 @@ pub(crate) fn build_collator_network( genesis_hash, ); + // Since this node has no syncing, we do not want light-clients to connect to it. + // Here we set any potential light-client slots to 0. + adjust_network_config_light_in_peers(&mut full_network_config.network_config); + let peer_store = PeerStore::new( - network_config + full_network_config .network_config .boot_nodes .iter() @@ -75,7 +80,7 @@ pub(crate) fn build_collator_network( }) }, fork_id: None, - network_config, + network_config: full_network_config, peer_store: peer_store_handle, genesis_hash, protocol_id, @@ -114,6 +119,18 @@ pub(crate) fn build_collator_network( Ok((network_service, network_starter, Box::new(SyncOracle {}))) } +fn adjust_network_config_light_in_peers(config: &mut NetworkConfiguration) { + let light_client_in_peers = (config.default_peers_set.in_peers + + config.default_peers_set.out_peers) + .saturating_sub(config.default_peers_set_num_full); + if light_client_in_peers > 0 { + tracing::debug!(target: crate::LOG_TARGET, "Detected {light_client_in_peers} peer slots for light clients. Since this minimal node does support\ + neither syncing nor light-client request/response, we are setting them to 0."); + } + config.default_peers_set.in_peers = + config.default_peers_set.in_peers.saturating_sub(light_client_in_peers); +} + struct SyncOracle; impl sp_consensus::SyncOracle for SyncOracle { -- GitLab From 0524aa51d3b253dd94a60a026c7b7c5a850bf297 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Wed, 8 Nov 2023 06:39:40 +0100 Subject: [PATCH 003/199] XCM builder pattern (#2107) Added a proc macro to be able to write XCMs using the builder pattern. This means we go from having to do this: ```rust let message: Xcm<()> = Xcm(vec![ WithdrawAsset(assets), BuyExecution { fees: asset, weight_limit: Unlimited }, DepositAsset { assets, beneficiary }, ]); ``` to this: ```rust let message: Xcm<()> = Xcm::builder() .withdraw_asset(assets) .buy_execution(asset, Unlimited), .deposit_asset(assets, beneficiary) .build(); ``` --------- Co-authored-by: Keith Yeung Co-authored-by: command-bot <> --- Cargo.lock | 1 + polkadot/xcm/procedural/Cargo.toml | 5 + .../xcm/procedural/src/builder_pattern.rs | 115 ++++++++++++++++++ polkadot/xcm/procedural/src/lib.rs | 13 ++ polkadot/xcm/procedural/tests/ui.rs | 32 +++++ .../procedural/tests/ui/builder_pattern.rs | 25 ++++ .../tests/ui/builder_pattern.stderr | 5 + polkadot/xcm/src/v3/mod.rs | 9 +- polkadot/xcm/xcm-simulator/example/src/lib.rs | 19 +++ prdoc/pr_2107.prdoc | 24 ++++ 10 files changed, 247 insertions(+), 1 deletion(-) create mode 100644 polkadot/xcm/procedural/src/builder_pattern.rs create mode 100644 polkadot/xcm/procedural/tests/ui.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern.stderr create mode 100644 prdoc/pr_2107.prdoc diff --git a/Cargo.lock b/Cargo.lock index 764d4ed01aa..70b0d43120f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21176,6 +21176,7 @@ dependencies = [ "proc-macro2", "quote", "syn 2.0.38", + "trybuild", ] [[package]] diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 56df0d94f58..33c2a94be0e 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -1,9 +1,11 @@ [package] name = "xcm-procedural" +description = "Procedural macros for XCM" authors.workspace = true edition.workspace = true license.workspace = true version = "1.0.0" +publish = true [lib] proc-macro = true @@ -13,3 +15,6 @@ proc-macro2 = "1.0.56" quote = "1.0.28" syn = "2.0.38" Inflector = "0.11.4" + +[dev-dependencies] +trybuild = { version = "1.0.74", features = ["diff"] } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs new file mode 100644 index 00000000000..ebad54e972b --- /dev/null +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -0,0 +1,115 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Derive macro for creating XCMs with a builder pattern + +use inflector::Inflector; +use proc_macro::TokenStream; +use proc_macro2::TokenStream as TokenStream2; +use quote::{format_ident, quote}; +use syn::{ + parse_macro_input, Data, DeriveInput, Error, Expr, ExprLit, Fields, Lit, Meta, MetaNameValue, +}; + +pub fn derive(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let builder_impl = match &input.data { + Data::Enum(data_enum) => generate_methods_for_enum(input.ident, data_enum), + _ => + return Error::new_spanned(&input, "Expected the `Instruction` enum") + .to_compile_error() + .into(), + }; + let output = quote! { + pub struct XcmBuilder(Vec>); + impl Xcm { + pub fn builder() -> XcmBuilder { + XcmBuilder::(Vec::new()) + } + } + #builder_impl + }; + output.into() +} + +fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> TokenStream2 { + let methods = data_enum.variants.iter().map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(&method_name_string, variant_name.span()); + let docs: Vec<_> = variant + .attrs + .iter() + .filter_map(|attr| match &attr.meta { + Meta::NameValue(MetaNameValue { + value: Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }), + .. + }) if attr.path().is_ident("doc") => Some(literal.value()), + _ => None, + }) + .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) + .collect(); + let method = match &variant.fields { + Fields::Unit => { + quote! { + pub fn #method_name(mut self) -> Self { + self.0.push(#name::::#variant_name); + self + } + } + }, + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + quote! { + pub fn #method_name(mut self, #(#arg_names: #arg_types),*) -> Self { + self.0.push(#name::::#variant_name(#(#arg_names),*)); + self + } + } + }, + Fields::Named(fields) => { + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + quote! { + pub fn #method_name(mut self, #(#arg_names: #arg_types),*) -> Self { + self.0.push(#name::::#variant_name { #(#arg_names),* }); + self + } + } + }, + }; + quote! { + #(#docs)* + #method + } + }); + let output = quote! { + impl XcmBuilder { + #(#methods)* + + pub fn build(self) -> Xcm { + Xcm(self.0) + } + } + }; + output +} diff --git a/polkadot/xcm/procedural/src/lib.rs b/polkadot/xcm/procedural/src/lib.rs index 2ebccadf50b..83cc6cdf98f 100644 --- a/polkadot/xcm/procedural/src/lib.rs +++ b/polkadot/xcm/procedural/src/lib.rs @@ -18,6 +18,7 @@ use proc_macro::TokenStream; +mod builder_pattern; mod v2; mod v3; mod weight_info; @@ -47,3 +48,15 @@ pub fn impl_conversion_functions_for_junctions_v3(input: TokenStream) -> TokenSt .unwrap_or_else(syn::Error::into_compile_error) .into() } + +/// This is called on the `Instruction` enum, not on the `Xcm` struct, +/// and allows for the following syntax for building XCMs: +/// let message = Xcm::builder() +/// .withdraw_asset(assets) +/// .buy_execution(fees, weight_limit) +/// .deposit_asset(assets, beneficiary) +/// .build(); +#[proc_macro_derive(Builder)] +pub fn derive_builder(input: TokenStream) -> TokenStream { + builder_pattern::derive(input) +} diff --git a/polkadot/xcm/procedural/tests/ui.rs b/polkadot/xcm/procedural/tests/ui.rs new file mode 100644 index 00000000000..a6ec35d0862 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! UI tests for XCM procedural macros + +#[cfg(not(feature = "disable-ui-tests"))] +#[test] +fn ui() { + // Only run the ui tests when `RUN_UI_TESTS` is set. + if std::env::var("RUN_UI_TESTS").is_err() { + return; + } + + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("SKIP_WASM_BUILD", "1"); + + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/*.rs"); +} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern.rs new file mode 100644 index 00000000000..e4bcda572ad --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern.rs @@ -0,0 +1,25 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when attaching the derive builder macro to something +//! other than the XCM `Instruction` enum. + +use xcm_procedural::Builder; + +#[derive(Builder)] +struct SomeStruct; + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern.stderr new file mode 100644 index 00000000000..439b40f31ca --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern.stderr @@ -0,0 +1,5 @@ +error: Expected the `Instruction` enum + --> tests/ui/builder_pattern.rs:23:1 + | +23 | struct SomeStruct; + | ^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index aa5bfe169ac..7ec3f6f40af 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -404,7 +404,14 @@ impl XcmContext { /// /// This is the inner XCM format and is version-sensitive. Messages are typically passed using the /// outer XCM format, known as `VersionedXcm`. -#[derive(Derivative, Encode, Decode, TypeInfo, xcm_procedural::XcmWeightInfoTrait)] +#[derive( + Derivative, + Encode, + Decode, + TypeInfo, + xcm_procedural::XcmWeightInfoTrait, + xcm_procedural::Builder, +)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] #[codec(encode_bound())] #[codec(decode_bound())] diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index 85b8ad1c5cb..03e7c19a914 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -649,4 +649,23 @@ mod tests { ); }); } + + #[test] + fn builder_pattern_works() { + let asset: MultiAsset = (Here, 100u128).into(); + let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); + let message: Xcm<()> = Xcm::builder() + .withdraw_asset(asset.clone().into()) + .buy_execution(asset.clone(), Unlimited) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); + assert_eq!( + message, + Xcm(vec![ + WithdrawAsset(asset.clone().into()), + BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, + DepositAsset { assets: asset.into(), beneficiary }, + ]) + ); + } } diff --git a/prdoc/pr_2107.prdoc b/prdoc/pr_2107.prdoc new file mode 100644 index 00000000000..0e33680555a --- /dev/null +++ b/prdoc/pr_2107.prdoc @@ -0,0 +1,24 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Add a builder pattern to create XCM programs + +doc: + - audience: Core Dev + description: | + XCMs can now be built using a builder pattern like so: + Xcm::builder() + .withdraw_asset(assets) + .buy_execution(fees, weight_limit) + .deposit_asset(assets, beneficiary) + .build(); + +migrations: + db: [] + + runtime: [] + +crates: + - name: xcm + +host_functions: [] -- GitLab From 2e2a75ff81907975e990202378a0adc93ce02fd3 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 8 Nov 2023 11:40:57 +0200 Subject: [PATCH 004/199] [testnets][xcm-emulator] add bridge-hub-westend and hook it up to emulator (#2204) `bridge-hub-westend-runtime` was added to cumulus/parachains, but wasn't hooked up to xcm-emulator to run tests against it. This commit addresses that ^. Signed-off-by: Adrian Catangiu --- Cargo.lock | 30 ++++++++ Cargo.toml | 1 + .../bridges/bridge-hub-westend/Cargo.toml | 41 ++++++++++ .../bridges/bridge-hub-westend/src/lib.rs | 67 +++++++++++++++++ .../bridge-hub-westend/src/tests/example.rs | 74 +++++++++++++++++++ .../bridge-hub-westend/src/tests/mod.rs | 17 +++++ .../bridge-hub-westend/src/tests/teleport.rs | 30 ++++++++ .../emulated/common/Cargo.toml | 2 + .../emulated/common/src/constants.rs | 57 ++++++++++++++ .../emulated/common/src/lib.rs | 27 ++++++- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 +- 11 files changed, 345 insertions(+), 3 deletions(-) create mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/example.rs create mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/mod.rs create mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/teleport.rs diff --git a/Cargo.lock b/Cargo.lock index 70b0d43120f..06ab9ed7e2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2298,6 +2298,35 @@ dependencies = [ "staging-xcm-executor", ] +[[package]] +name = "bridge-hub-westend-integration-tests" +version = "1.0.0" +dependencies = [ + "asset-test-utils", + "bp-messages", + "bridge-hub-westend-runtime", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-xcmp-queue", + "frame-support", + "frame-system", + "integration-tests-common", + "pallet-assets", + "pallet-balances", + "pallet-bridge-messages", + "pallet-message-queue", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-parachains", + "sp-core", + "sp-weights", + "staging-xcm", + "staging-xcm-executor", + "xcm-emulator", +] + [[package]] name = "bridge-hub-westend-runtime" version = "0.1.0" @@ -6717,6 +6746,7 @@ dependencies = [ "bridge-hub-kusama-runtime", "bridge-hub-polkadot-runtime", "bridge-hub-rococo-runtime", + "bridge-hub-westend-runtime", "bridge-runtime-common", "collectives-polkadot-runtime", "cumulus-pallet-parachain-system", diff --git a/Cargo.toml b/Cargo.toml index ccb952cc963..2344a24a5d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,6 +68,7 @@ members = [ "cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo", "cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend", "cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo", + "cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend", "cumulus/parachains/integration-tests/emulated/common", "cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/parachain-info", diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/Cargo.toml new file mode 100644 index 00000000000..af5408b888b --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "bridge-hub-westend-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Westend runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } + +# Substrate +frame-support = { path = "../../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../../substrate/frame/system", default-features = false} +sp-core = { path = "../../../../../../substrate/primitives/core", default-features = false} +sp-weights = { path = "../../../../../../substrate/primitives/weights", default-features = false} +pallet-balances = { path = "../../../../../../substrate/frame/balances", default-features = false} +pallet-assets = { path = "../../../../../../substrate/frame/assets", default-features = false} +pallet-message-queue = { path = "../../../../../../substrate/frame/message-queue", default-features = false } + +# Polkadot +polkadot-core-primitives = { path = "../../../../../../polkadot/core-primitives", default-features = false} +polkadot-parachain-primitives = { path = "../../../../../../polkadot/parachain", default-features = false} +polkadot-runtime-parachains = { path = "../../../../../../polkadot/runtime/parachains" } +xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false} +pallet-xcm = { path = "../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Cumulus +asset-test-utils = { path = "../../../../../parachains/runtimes/assets/test-utils", default-features = false } +parachains-common = { path = "../../../../common" } +cumulus-pallet-xcmp-queue = { path = "../../../../../pallets/xcmp-queue", default-features = false} +cumulus-pallet-dmp-queue = { path = "../../../../../pallets/dmp-queue", default-features = false} +pallet-bridge-messages = { path = "../../../../../../bridges/modules/messages", default-features = false} +bp-messages = { path = "../../../../../../bridges/primitives/messages", default-features = false} +bridge-hub-westend-runtime = { path = "../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } + +# Local +xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} +integration-tests-common = { path = "../../common", default-features = false} diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/lib.rs new file mode 100644 index 00000000000..8b17ce53eb0 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/lib.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use bp_messages::LaneId; +pub use frame_support::assert_ok; +pub use integration_tests_common::{ + constants::{ + bridge_hub_westend::ED as BRIDGE_HUB_WESTEND_ED, westend::ED as WESTEND_ED, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, + }, + test_parachain_is_trusted_teleporter, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + AssetHubRococo, AssetHubWestend, AssetHubWestendReceiver, BridgeHubRococo, BridgeHubWestend, + BridgeHubWestendPallet, BridgeHubWestendSender, PenpalWestendA, Westend, WestendPallet, +}; +pub use parachains_common::{AccountId, Balance}; +pub use xcm::{ + prelude::{AccountId32 as AccountId32Junction, *}, + v3::{ + Error, + NetworkId::{Rococo as RococoId, Westend as WestendId}, + }, +}; +pub use xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, +}; + +pub const ASSET_ID: u32 = 1; +pub const ASSET_MIN_BALANCE: u128 = 1000; +pub const ASSETS_PALLET_ID: u8 = 50; + +pub type RelayToSystemParaTest = Test; +pub type SystemParaToRelayTest = Test; +pub type SystemParaToParaTest = Test; + +/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests +pub fn relay_test_args(amount: Balance) -> TestArgs { + TestArgs { + dest: Westend::child_location_of(AssetHubWestend::para_id()), + beneficiary: AccountId32Junction { + network: None, + id: AssetHubWestendReceiver::get().into(), + } + .into(), + amount, + assets: (Here, amount).into(), + asset_id: None, + fee_asset_item: 0, + weight_limit: WeightLimit::Unlimited, + } +} + +#[cfg(test)] +mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/example.rs new file mode 100644 index 00000000000..e15a0dedab9 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/example.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; + +#[test] +fn example() { + // Init tests variables + // XcmPallet send arguments + let sudo_origin = ::RuntimeOrigin::root(); + let destination = Westend::child_location_of(BridgeHubWestend::para_id()).into(); + let weight_limit = WeightLimit::Unlimited; + let check_origin = None; + + let remote_xcm = Xcm(vec![ClearOrigin]); + + let xcm = VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit, check_origin }, + ExportMessage { + network: RococoId, + destination: X1(Parachain(AssetHubRococo::para_id().into())), + xcm: remote_xcm, + }, + ])); + + // Westend Global Consensus + // Send XCM message from Relay Chain to Bridge Hub source Parachain + Westend::execute_with(|| { + assert_ok!(::XcmPallet::send( + sudo_origin, + bx!(destination), + bx!(xcm), + )); + + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::XcmPallet(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + // Receive XCM message in Bridge Hub source Parachain + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubWestend, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { + success: true, + .. + }) => {}, + RuntimeEvent::BridgeRococoMessages(pallet_bridge_messages::Event::MessageAccepted { + lane_id: LaneId([0, 0, 0, 2]), + nonce: 1, + }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/mod.rs new file mode 100644 index 00000000000..1eef05c6b92 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/mod.rs @@ -0,0 +1,17 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod example; +mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/teleport.rs new file mode 100644 index 00000000000..8dff6c29295 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use bridge_hub_westend_runtime::xcm_config::XcmConfig; + +#[test] +fn teleport_to_other_system_parachains_works() { + let amount = BRIDGE_HUB_WESTEND_ED * 100; + let native_asset: MultiAssets = (Parent, amount).into(); + + test_parachain_is_trusted_teleporter!( + BridgeHubWestend, // Origin + XcmConfig, // XCM configuration + vec![AssetHubWestend], // Destinations + (native_asset, amount) + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 6688c80eb9a..16a37e94be5 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -52,6 +52,7 @@ collectives-polkadot-runtime = { path = "../../../runtimes/collectives/collectiv bridge-hub-kusama-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-kusama" } bridge-hub-polkadot-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-polkadot" } bridge-hub-rococo-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-rococo" } +bridge-hub-westend-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-westend" } xcm-emulator = { default-features = false, path = "../../../../xcm/xcm-emulator" } cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../pallets/xcmp-queue" } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system" } @@ -68,6 +69,7 @@ runtime-benchmarks = [ "bridge-hub-kusama-runtime/runtime-benchmarks", "bridge-hub-polkadot-runtime/runtime-benchmarks", "bridge-hub-rococo-runtime/runtime-benchmarks", + "bridge-hub-westend-runtime/runtime-benchmarks", "bridge-runtime-common/runtime-benchmarks", "collectives-polkadot-runtime/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", diff --git a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs b/cumulus/parachains/integration-tests/emulated/common/src/constants.rs index 86027229c59..1dffbccbba3 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/constants.rs @@ -1160,3 +1160,60 @@ pub mod bridge_hub_rococo { } } } + +// Bridge Hub Westend +pub mod bridge_hub_westend { + use super::*; + pub const PARA_ID: u32 = 1013; + pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + + pub fn genesis() -> Storage { + let genesis_config = serde_json::json!({ + "balances": { + "balances": accounts::init_balances() + .iter() + .cloned() + .map(|k| (k, ED * 4096)) + .collect::>(), + }, + "parachainInfo": { + "parachainId": cumulus_primitives_core::ParaId::from(PARA_ID), + }, + "collatorSelection": { + "invulnerables": collators::invulnerables() + .iter() + .cloned() + .map(|(acc, _)| acc) + .collect::>(), + "candidacyBond": ED * 16, + }, + "session": { + "keys": collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + bridge_hub_westend_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect::>(), + }, + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + }, + "bridgeRococoGrandpa": { + "owner": Some(get_account_id_from_seed::(accounts::BOB)), + }, + "bridgeRococoMessages": { + "owner": Some(get_account_id_from_seed::(accounts::BOB)), + } + }); + + build_genesis_storage( + genesis_config, + bridge_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) + } +} diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 2cb90650b94..1debd50d609 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -20,8 +20,8 @@ pub mod xcm_helpers; use constants::{ accounts::{ALICE, BOB}, - asset_hub_rococo, asset_hub_westend, asset_hub_wococo, bridge_hub_rococo, penpal, rococo, - westend, + asset_hub_rococo, asset_hub_westend, asset_hub_wococo, bridge_hub_rococo, bridge_hub_westend, + penpal, rococo, westend, }; use impls::{RococoWococoMessageHandler, WococoRococoMessageHandler}; pub use paste; @@ -118,6 +118,23 @@ decl_test_parachains! { AssetConversion: asset_hub_westend_runtime::AssetConversion, } }, + pub struct BridgeHubWestend { + genesis = bridge_hub_westend::genesis(), + on_init = { + bridge_hub_westend_runtime::AuraExt::on_initialize(1); + }, + runtime = bridge_hub_westend_runtime, + core = { + XcmpMessageHandler: bridge_hub_westend_runtime::XcmpQueue, + LocationToAccountId: bridge_hub_westend_runtime::xcm_config::LocationToAccountId, + ParachainInfo: bridge_hub_westend_runtime::ParachainInfo, + MessageProcessor: DefaultParaMessageProcessor, + }, + pallets = { + PolkadotXcm: bridge_hub_westend_runtime::PolkadotXcm, + Balances: bridge_hub_westend_runtime::Balances, + } + }, pub struct PenpalWestendA { genesis = penpal::genesis(penpal::PARA_ID_A), on_init = { @@ -257,6 +274,7 @@ decl_test_networks! { relay_chain = Westend, parachains = vec![ AssetHubWestend, + BridgeHubWestend, PenpalWestendA, ], bridge = () @@ -323,6 +341,10 @@ impl_assert_events_helpers_for_parachain!(AssetHubRococo); // PenpalWestendA implementation impl_assert_events_helpers_for_parachain!(PenpalWestendA); +// BridgeHubWestend implementation +impl_accounts_helpers_for_parachain!(BridgeHubWestend); +impl_assert_events_helpers_for_parachain!(BridgeHubWestend); + // BridgeHubRococo implementation impl_accounts_helpers_for_parachain!(BridgeHubRococo); impl_assert_events_helpers_for_parachain!(BridgeHubRococo); @@ -343,6 +365,7 @@ decl_test_sender_receiver_accounts_parameter_types! { // Bridged Hubs BridgeHubRococo { sender: ALICE, receiver: BOB }, BridgeHubWococo { sender: ALICE, receiver: BOB }, + BridgeHubWestend { sender: ALICE, receiver: BOB }, // Penpals PenpalWestendA { sender: ALICE, receiver: BOB }, PenpalRococoA { sender: ALICE, receiver: BOB }, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 76ff63db4da..671d38e808f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -29,6 +29,7 @@ pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} @@ -62,7 +63,6 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} -- GitLab From 640e385aec02ee8b80284febe679ce785ade66b9 Mon Sep 17 00:00:00 2001 From: benluelo <57334811+benluelo@users.noreply.github.com> Date: Wed, 8 Nov 2023 10:51:40 +0000 Subject: [PATCH 005/199] feat(frame-support-procedural): add `automaticaly_derived` attr to `NoBound` derives (#2197) fixes #2196 --- substrate/frame/support/procedural/src/no_bound/clone.rs | 1 + substrate/frame/support/procedural/src/no_bound/debug.rs | 1 + substrate/frame/support/procedural/src/no_bound/default.rs | 1 + substrate/frame/support/procedural/src/no_bound/partial_eq.rs | 1 + substrate/frame/support/test/tests/derive_no_bound.rs | 3 +++ 5 files changed, 7 insertions(+) diff --git a/substrate/frame/support/procedural/src/no_bound/clone.rs b/substrate/frame/support/procedural/src/no_bound/clone.rs index 2c9037984f5..8e57a10d34c 100644 --- a/substrate/frame/support/procedural/src/no_bound/clone.rs +++ b/substrate/frame/support/procedural/src/no_bound/clone.rs @@ -98,6 +98,7 @@ pub fn derive_clone_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke quote::quote!( const _: () = { + #[automatically_derived] impl #impl_generics ::core::clone::Clone for #name #ty_generics #where_clause { fn clone(&self) -> Self { #impl_ diff --git a/substrate/frame/support/procedural/src/no_bound/debug.rs b/substrate/frame/support/procedural/src/no_bound/debug.rs index 88f5dfe7bec..dd14b9cd564 100644 --- a/substrate/frame/support/procedural/src/no_bound/debug.rs +++ b/substrate/frame/support/procedural/src/no_bound/debug.rs @@ -112,6 +112,7 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke quote::quote!( const _: () = { + #[automatically_derived] impl #impl_generics ::core::fmt::Debug for #input_ident #ty_generics #where_clause { fn fmt(&self, fmt: &mut ::core::fmt::Formatter) -> ::core::fmt::Result { #impl_ diff --git a/substrate/frame/support/procedural/src/no_bound/default.rs b/substrate/frame/support/procedural/src/no_bound/default.rs index ddaab26c441..0524247d24e 100644 --- a/substrate/frame/support/procedural/src/no_bound/default.rs +++ b/substrate/frame/support/procedural/src/no_bound/default.rs @@ -149,6 +149,7 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To quote!( const _: () = { + #[automatically_derived] impl #impl_generics ::core::default::Default for #name #ty_generics #where_clause { fn default() -> Self { #impl_ diff --git a/substrate/frame/support/procedural/src/no_bound/partial_eq.rs b/substrate/frame/support/procedural/src/no_bound/partial_eq.rs index 1a4a4e50b39..7cf5701b193 100644 --- a/substrate/frame/support/procedural/src/no_bound/partial_eq.rs +++ b/substrate/frame/support/procedural/src/no_bound/partial_eq.rs @@ -128,6 +128,7 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: quote::quote!( const _: () = { + #[automatically_derived] impl #impl_generics ::core::cmp::PartialEq for #name #ty_generics #where_clause { fn eq(&self, other: &Self) -> bool { #impl_ diff --git a/substrate/frame/support/test/tests/derive_no_bound.rs b/substrate/frame/support/test/tests/derive_no_bound.rs index dc78027f22e..af2633dc53c 100644 --- a/substrate/frame/support/test/tests/derive_no_bound.rs +++ b/substrate/frame/support/test/tests/derive_no_bound.rs @@ -136,6 +136,7 @@ struct StructNoGenerics { field2: u64, } +#[allow(dead_code)] #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum EnumNoGenerics { #[default] @@ -162,6 +163,7 @@ enum Enum { } // enum that will have a named default. +#[allow(dead_code)] #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum Enum2 { #[default] @@ -175,6 +177,7 @@ enum Enum2 { VariantUnit2, } +#[allow(dead_code)] // enum that will have a unit default. #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum Enum3 { -- GitLab From 9adb46c868ab408a97077dc967143e178b2b48e8 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 8 Nov 2023 12:59:55 +0100 Subject: [PATCH 006/199] Add `sudo::remove_key` (#2165) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes: - Adds a new call `remove_key` to the sudo pallet to permanently remove the sudo key. - Remove some clones and general maintenance --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher Co-authored-by: command-bot <> --- .../runtime/rococo/src/weights/pallet_sudo.rs | 55 ++++++---- .../westend/src/weights/pallet_sudo.rs | 58 +++++----- prdoc/pr_2165.prdoc | 17 +++ substrate/frame/sudo/src/benchmarking.rs | 44 +++++--- substrate/frame/sudo/src/lib.rs | 92 ++++++++-------- substrate/frame/sudo/src/mock.rs | 4 +- substrate/frame/sudo/src/tests.rs | 28 ++--- substrate/frame/sudo/src/weights.rs | 102 +++++++++++------- 8 files changed, 240 insertions(+), 160 deletions(-) create mode 100644 prdoc/pr_2165.prdoc diff --git a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs index 83215af5b8a..694174954fc 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_sudo.rs @@ -17,24 +17,25 @@ //! Autogenerated weights for `pallet_sudo` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=rococo-dev // --steps=50 // --repeat=20 -// --pallet=pallet_sudo // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_sudo +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,38 +48,50 @@ use core::marker::PhantomData; /// Weight functions for `pallet_sudo`. pub struct WeightInfo(PhantomData); impl pallet_sudo::WeightInfo for WeightInfo { - /// Storage: Sudo Key (r:1 w:1) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 13_047_000 picoseconds. - Weight::from_parts(13_325_000, 0) + // Minimum execution time: 8_432_000 picoseconds. + Weight::from_parts(8_757_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 13_250_000 picoseconds. - Weight::from_parts(13_544_000, 0) + // Minimum execution time: 9_167_000 picoseconds. + Weight::from_parts(9_397_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 13_424_000 picoseconds. - Weight::from_parts(13_801_000, 0) + // Minimum execution time: 9_133_000 picoseconds. + Weight::from_parts(9_573_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 7_374_000 picoseconds. + Weight::from_parts(7_702_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/westend/src/weights/pallet_sudo.rs b/polkadot/runtime/westend/src/weights/pallet_sudo.rs index 8a220173ee5..e9ab3ad37a4 100644 --- a/polkadot/runtime/westend/src/weights/pallet_sudo.rs +++ b/polkadot/runtime/westend/src/weights/pallet_sudo.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_sudo` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet -// --chain=westend-dev // --steps=50 // --repeat=20 -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --pallet=pallet_sudo // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_sudo +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,38 +48,50 @@ use core::marker::PhantomData; /// Weight functions for `pallet_sudo`. pub struct WeightInfo(PhantomData); impl pallet_sudo::WeightInfo for WeightInfo { - /// Storage: Sudo Key (r:1 w:1) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 12_360_000 picoseconds. - Weight::from_parts(12_803_000, 0) + // Minimum execution time: 8_750_000 picoseconds. + Weight::from_parts(9_102_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 12_158_000 picoseconds. - Weight::from_parts(12_506_000, 0) + // Minimum execution time: 9_607_000 picoseconds. + Weight::from_parts(10_139_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: // Measured: `132` // Estimated: `1517` - // Minimum execution time: 12_286_000 picoseconds. - Weight::from_parts(12_664_000, 0) + // Minimum execution time: 9_886_000 picoseconds. + Weight::from_parts(10_175_000, 0) .saturating_add(Weight::from_parts(0, 1517)) .saturating_add(T::DbWeight::get().reads(1)) } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `132` + // Estimated: `1517` + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_152_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/prdoc/pr_2165.prdoc b/prdoc/pr_2165.prdoc new file mode 100644 index 00000000000..31cb691c43a --- /dev/null +++ b/prdoc/pr_2165.prdoc @@ -0,0 +1,17 @@ +title: Add sudo::remove_key + +doc: + - audience: Core Dev + description: | + Pallet `Sudo` now has the ability to remove the sudo key via `remove_key`. This is a less-invasive way of rendering the sudo pallet useless without needing a code upgrade. + +migrations: + db: [] + + runtime: [] + +crates: + - name: pallet-sudo + semver: minor + +host_functions: [] diff --git a/substrate/frame/sudo/src/benchmarking.rs b/substrate/frame/sudo/src/benchmarking.rs index 6a365c1873c..e64233fe748 100644 --- a/substrate/frame/sudo/src/benchmarking.rs +++ b/substrate/frame/sudo/src/benchmarking.rs @@ -22,41 +22,40 @@ use crate::Pallet; use frame_benchmarking::v2::*; use frame_system::RawOrigin; -const SEED: u32 = 0; - -fn assert_last_event(generic_event: ::RuntimeEvent) { - frame_system::Pallet::::assert_last_event(generic_event.into()); +fn assert_last_event(generic_event: crate::Event) { + let re: ::RuntimeEvent = generic_event.into(); + frame_system::Pallet::::assert_last_event(re.into()); } -#[benchmarks( where ::RuntimeCall: From>)] +#[benchmarks(where ::RuntimeCall: From>)] mod benchmarks { use super::*; #[benchmark] fn set_key() { let caller: T::AccountId = whitelisted_caller(); - Key::::put(caller.clone()); + Key::::put(&caller); - let new_sudoer: T::AccountId = account("sudoer", 0, SEED); + let new_sudoer: T::AccountId = account("sudoer", 0, 0); let new_sudoer_lookup = T::Lookup::unlookup(new_sudoer.clone()); #[extrinsic_call] _(RawOrigin::Signed(caller.clone()), new_sudoer_lookup); - assert_last_event::(Event::KeyChanged { old_sudoer: Some(caller) }.into()); + assert_last_event::(Event::KeyChanged { old: Some(caller), new: new_sudoer }); } #[benchmark] fn sudo() { let caller: T::AccountId = whitelisted_caller(); - Key::::put(caller.clone()); + Key::::put(&caller); - let call: ::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let call = frame_system::Call::remark { remark: vec![] }.into(); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), Box::new(call.clone())); + _(RawOrigin::Signed(caller), Box::new(call)); - assert_last_event::(Event::Sudid { sudo_result: Ok(()) }.into()) + assert_last_event::(Event::Sudid { sudo_result: Ok(()) }) } #[benchmark] @@ -64,15 +63,26 @@ mod benchmarks { let caller: T::AccountId = whitelisted_caller(); Key::::put(caller.clone()); - let call: ::RuntimeCall = frame_system::Call::remark { remark: vec![] }.into(); + let call = frame_system::Call::remark { remark: vec![] }.into(); + + let who: T::AccountId = account("as", 0, 0); + let who_lookup = T::Lookup::unlookup(who); + + #[extrinsic_call] + _(RawOrigin::Signed(caller), who_lookup, Box::new(call)); + + assert_last_event::(Event::SudoAsDone { sudo_result: Ok(()) }) + } - let who: T::AccountId = account("as", 0, SEED); - let who_lookup = T::Lookup::unlookup(who.clone()); + #[benchmark] + fn remove_key() { + let caller: T::AccountId = whitelisted_caller(); + Key::::put(&caller); #[extrinsic_call] - _(RawOrigin::Signed(caller), who_lookup, Box::new(call.clone())); + _(RawOrigin::Signed(caller.clone())); - assert_last_event::(Event::SudoAsDone { sudo_result: Ok(()) }.into()) + assert_last_event::(Event::KeyRemoved {}); } impl_benchmark_test_suite!(Pallet, crate::mock::new_bench_ext(), crate::mock::Test); diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 36de44d9d72..d556c5eb6ae 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -146,7 +146,7 @@ type AccountIdLookupOf = <::Lookup as StaticLookup pub mod pallet { use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; + use frame_system::{pallet_prelude::*, RawOrigin}; /// Default preludes for [`Config`]. pub mod config_preludes { @@ -190,11 +190,6 @@ pub mod pallet { #[pallet::call] impl Pallet { /// Authenticates the sudo key and dispatches a function call with `Root` origin. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// ## Complexity - /// - O(1). #[pallet::call_index(0)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); @@ -207,12 +202,11 @@ pub mod pallet { origin: OriginFor, call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(Self::key().map_or(false, |k| sender == k), Error::::RequireSudo); + Self::ensure_sudo(origin)?; - let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); + let res = call.dispatch_bypass_filter(RawOrigin::Root.into()); Self::deposit_event(Event::Sudid { sudo_result: res.map(|_| ()).map_err(|e| e.error) }); + // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -222,9 +216,6 @@ pub mod pallet { /// Sudo user to specify the weight of the call. /// /// The dispatch origin for this call must be _Signed_. - /// - /// ## Complexity - /// - O(1). #[pallet::call_index(1)] #[pallet::weight((*weight, call.get_dispatch_info().class))] pub fn sudo_unchecked_weight( @@ -232,37 +223,30 @@ pub mod pallet { call: Box<::RuntimeCall>, weight: Weight, ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; + Self::ensure_sudo(origin)?; let _ = weight; // We don't check the weight witness since it is a root call. - ensure!(Self::key().map_or(false, |k| sender == k), Error::::RequireSudo); - let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); + let res = call.dispatch_bypass_filter(RawOrigin::Root.into()); Self::deposit_event(Event::Sudid { sudo_result: res.map(|_| ()).map_err(|e| e.error) }); + // Sudo user does not pay a fee. Ok(Pays::No.into()) } /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo /// key. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// ## Complexity - /// - O(1). #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::set_key())] pub fn set_key( origin: OriginFor, new: AccountIdLookupOf, ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(Self::key().map_or(false, |k| sender == k), Error::::RequireSudo); + Self::ensure_sudo(origin)?; + let new = T::Lookup::lookup(new)?; + Self::deposit_event(Event::KeyChanged { old: Key::::get(), new: new.clone() }); + Key::::put(new); - Self::deposit_event(Event::KeyChanged { old_sudoer: Key::::get() }); - Key::::put(&new); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -271,9 +255,6 @@ pub mod pallet { /// a given account. /// /// The dispatch origin for this call must be _Signed_. - /// - /// ## Complexity - /// - O(1). #[pallet::call_index(3)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); @@ -287,17 +268,29 @@ pub mod pallet { who: AccountIdLookupOf, call: Box<::RuntimeCall>, ) -> DispatchResultWithPostInfo { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(Self::key().map_or(false, |k| sender == k), Error::::RequireSudo); + Self::ensure_sudo(origin)?; let who = T::Lookup::lookup(who)?; - - let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()); - + let res = call.dispatch_bypass_filter(RawOrigin::Signed(who).into()); Self::deposit_event(Event::SudoAsDone { sudo_result: res.map(|_| ()).map_err(|e| e.error), }); + + // Sudo user does not pay a fee. + Ok(Pays::No.into()) + } + + /// Permanently removes the sudo key. + /// + /// **This cannot be un-done.** + #[pallet::call_index(4)] + #[pallet::weight(T::WeightInfo::remove_key())] + pub fn remove_key(origin: OriginFor) -> DispatchResultWithPostInfo { + Self::ensure_sudo(origin)?; + + Self::deposit_event(Event::KeyRemoved {}); + Key::::kill(); + // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -313,9 +306,13 @@ pub mod pallet { }, /// The sudo key has been updated. KeyChanged { - /// The old sudo key if one was previously set. - old_sudoer: Option, + /// The old sudo key (if one was previously set). + old: Option, + /// The new sudo key (if one was set). + new: T::AccountId, }, + /// The key was permanently removed. + KeyRemoved, /// A [sudo_as](Pallet::sudo_as) call just took place. SudoAsDone { /// The result of the call made by the sudo user. @@ -324,9 +321,9 @@ pub mod pallet { } #[pallet::error] - /// Error for the Sudo pallet + /// Error for the Sudo pallet. pub enum Error { - /// Sender must be the Sudo account + /// Sender must be the Sudo account. RequireSudo, } @@ -345,8 +342,19 @@ pub mod pallet { #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - if let Some(ref key) = self.key { - Key::::put(key); + Key::::set(self.key.clone()); + } + } + + impl Pallet { + /// Ensure that the caller is the sudo key. + pub(crate) fn ensure_sudo(origin: OriginFor) -> DispatchResult { + let sender = ensure_signed(origin)?; + + if Self::key().map_or(false, |k| k == sender) { + Ok(()) + } else { + Err(Error::::RequireSudo.into()) } } } diff --git a/substrate/frame/sudo/src/mock.rs b/substrate/frame/sudo/src/mock.rs index 427bda6d99e..6f123b7c82b 100644 --- a/substrate/frame/sudo/src/mock.rs +++ b/substrate/frame/sudo/src/mock.rs @@ -156,7 +156,9 @@ pub fn new_test_ext(root_key: u64) -> sp_io::TestExternalities { sudo::GenesisConfig:: { key: Some(root_key) } .assimilate_storage(&mut t) .unwrap(); - t.into() + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(|| System::set_block_number(1)); + ext } #[cfg(feature = "runtime-benchmarks")] diff --git a/substrate/frame/sudo/src/tests.rs b/substrate/frame/sudo/src/tests.rs index 6963ba2e6a0..13dc069ddef 100644 --- a/substrate/frame/sudo/src/tests.rs +++ b/substrate/frame/sudo/src/tests.rs @@ -59,9 +59,6 @@ fn sudo_basics() { #[test] fn sudo_emits_events_correctly() { new_test_ext(1).execute_with(|| { - // Set block number to 1 because events are not emitted on block 0. - System::set_block_number(1); - // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, @@ -118,9 +115,6 @@ fn sudo_unchecked_weight_basics() { #[test] fn sudo_unchecked_weight_emits_events_correctly() { new_test_ext(1).execute_with(|| { - // Set block number to 1 because events are not emitted on block 0. - System::set_block_number(1); - // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(RuntimeCall::Logger(LoggerCall::privileged_i32_log { i: 42, @@ -154,15 +148,24 @@ fn set_key_basics() { #[test] fn set_key_emits_events_correctly() { new_test_ext(1).execute_with(|| { - // Set block number to 1 because events are not emitted on block 0. - System::set_block_number(1); - // A root `key` can change the root `key`. assert_ok!(Sudo::set_key(RuntimeOrigin::signed(1), 2)); - System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old_sudoer: Some(1) })); + System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old: Some(1), new: 2 })); // Double check. assert_ok!(Sudo::set_key(RuntimeOrigin::signed(2), 4)); - System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old_sudoer: Some(2) })); + System::assert_has_event(TestEvent::Sudo(Event::KeyChanged { old: Some(2), new: 4 })); + }); +} + +#[test] +fn remove_key_works() { + new_test_ext(1).execute_with(|| { + assert_ok!(Sudo::remove_key(RuntimeOrigin::signed(1))); + assert!(Sudo::key().is_none()); + System::assert_has_event(TestEvent::Sudo(Event::KeyRemoved {})); + + assert_noop!(Sudo::remove_key(RuntimeOrigin::signed(1)), Error::::RequireSudo); + assert_noop!(Sudo::set_key(RuntimeOrigin::signed(1), 1), Error::::RequireSudo); }); } @@ -201,9 +204,6 @@ fn sudo_as_basics() { #[test] fn sudo_as_emits_events_correctly() { new_test_ext(1).execute_with(|| { - // Set block number to 1 because events are not emitted on block 0. - System::set_block_number(1); - // A non-privileged function will work when passed to `sudo_as` with the root `key`. let call = Box::new(RuntimeCall::Logger(LoggerCall::non_privileged_log { i: 42, diff --git a/substrate/frame/sudo/src/weights.rs b/substrate/frame/sudo/src/weights.rs index 0cdd0c8a81f..10d1a9f7a51 100644 --- a/substrate/frame/sudo/src/weights.rs +++ b/substrate/frame/sudo/src/weights.rs @@ -15,32 +15,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Autogenerated weights for pallet_sudo. +//! Autogenerated weights for `pallet_sudo` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-16, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-e8ezs4ez-project-145-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate +// target/production/substrate-node // benchmark // pallet -// --chain=dev // --steps=50 // --repeat=20 -// --pallet=pallet_sudo -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --output=./frame/sudo/src/weights.rs -// --header=./HEADER-APACHE2 -// --template=./.maintain/frame-weight-template.hbs +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_sudo +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/sudo/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,80 +47,103 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; -/// Weight functions needed for pallet_sudo. +/// Weight functions needed for `pallet_sudo`. pub trait WeightInfo { fn set_key() -> Weight; fn sudo() -> Weight; fn sudo_as() -> Weight; + fn remove_key() -> Weight; } -/// Weights for pallet_sudo using the Substrate node and recommended hardware. +/// Weights for `pallet_sudo` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: Sudo Key (r:1 w:1) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_918_000 picoseconds. - Weight::from_parts(13_403_000, 1517) + // Minimum execution time: 9_600_000 picoseconds. + Weight::from_parts(10_076_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_693_000 picoseconds. - Weight::from_parts(13_001_000, 1517) + // Minimum execution time: 10_453_000 picoseconds. + Weight::from_parts(10_931_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_590_000 picoseconds. - Weight::from_parts(12_994_000, 1517) + // Minimum execution time: 10_202_000 picoseconds. + Weight::from_parts(10_800_000, 1517) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 8_555_000 picoseconds. + Weight::from_parts(8_846_000, 1517) .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) } } -// For backwards compatibility and tests +// For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: Sudo Key (r:1 w:1) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn set_key() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_918_000 picoseconds. - Weight::from_parts(13_403_000, 1517) + // Minimum execution time: 9_600_000 picoseconds. + Weight::from_parts(10_076_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_693_000 picoseconds. - Weight::from_parts(13_001_000, 1517) + // Minimum execution time: 10_453_000 picoseconds. + Weight::from_parts(10_931_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) } - /// Storage: Sudo Key (r:1 w:0) - /// Proof: Sudo Key (max_values: Some(1), max_size: Some(32), added: 527, mode: MaxEncodedLen) + /// Storage: `Sudo::Key` (r:1 w:0) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) fn sudo_as() -> Weight { // Proof Size summary in bytes: // Measured: `165` // Estimated: `1517` - // Minimum execution time: 12_590_000 picoseconds. - Weight::from_parts(12_994_000, 1517) + // Minimum execution time: 10_202_000 picoseconds. + Weight::from_parts(10_800_000, 1517) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Sudo::Key` (r:1 w:1) + /// Proof: `Sudo::Key` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + fn remove_key() -> Weight { + // Proof Size summary in bytes: + // Measured: `165` + // Estimated: `1517` + // Minimum execution time: 8_555_000 picoseconds. + Weight::from_parts(8_846_000, 1517) .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) } } -- GitLab From 9673fbfa32b7f89c687c175470a766317664e847 Mon Sep 17 00:00:00 2001 From: Yuri Volkov <0@mcornholio.ru> Date: Wed, 8 Nov 2023 14:19:13 +0100 Subject: [PATCH 007/199] Adding gitspiegel-trigger workflow (#2135) GitHub has a setting that requires manual click for executing GHA on the branch, for the first-time contributors: https://docs.github.com/en/actions/managing-workflow-runs/approving-workflow-runs-from-public-forks. After this PR, gitspiegel will respect that setting. So, for PRs from first-time contributors, gitspiegel won't do mirroring until the button in PR is clicked. More info: https://github.com/paritytech/gitspiegel/issues/169 --- .github/workflows/gitspiegel-trigger.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/gitspiegel-trigger.yml diff --git a/.github/workflows/gitspiegel-trigger.yml b/.github/workflows/gitspiegel-trigger.yml new file mode 100644 index 00000000000..dce3aaf2fec --- /dev/null +++ b/.github/workflows/gitspiegel-trigger.yml @@ -0,0 +1,22 @@ +name: gitspiegel sync + +# This workflow doesn't do anything, it's only use is to trigger "workflow_run" +# webhook, that'll be consumed by gitspiegel +# This way, gitspiegel won't do mirroring, unless this workflow runs, +# and running the workflow is protected by GitHub + +on: + pull_request: + types: + - opened + - synchronize + - unlocked + - ready_for_review + - reopened + +jobs: + sync: + runs-on: ubuntu-latest + steps: + - name: Do nothing + run: echo "let's go" -- GitLab From 1bc08858292b88b6c834fbc9f72d61e692363e4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 8 Nov 2023 14:33:19 +0100 Subject: [PATCH 008/199] validate-block: Fix `TrieCache` implementation (#2214) The trie cache implementation was ignoring the `storage_root` when setting up the value cache. The problem with this is that the value cache works using `storage_keys` and these keys are not unique across different tries. A block can actually have different tries (main trie and multiple child tries). This pull request fixes the issue by not ignoring the `storage_root` and returning an unique `value_cache` per `storage_root`. It also adds a test for the seen bug and improves documentation that this doesn't happen again. --- Cargo.lock | 1 + cumulus/pallets/parachain-system/Cargo.toml | 1 + .../src/validate_block/tests.rs | 48 +++++++++++++++++-- .../src/validate_block/trie_cache.rs | 24 +++++++--- cumulus/test/runtime/src/test_pallet.rs | 24 ++++++++++ .../state-machine/src/trie_backend.rs | 5 +- substrate/test-utils/client/src/client_ext.rs | 4 +- substrate/test-utils/client/src/lib.rs | 2 +- 8 files changed, 95 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06ab9ed7e2f..8c7121e235c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3868,6 +3868,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "futures", "hex-literal", "impl-trait-for-tuples", "lazy_static", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 1c26b3a7486..5600c95a2a6 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -45,6 +45,7 @@ assert_matches = "1.5" hex-literal = "0.4.1" lazy_static = "1.4" rand = "0.8.5" +futures = "0.3.28" # Substrate sc-client-api = { path = "../../../substrate/client/api" } diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 0cf68f25cc3..f17ac6007a0 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -21,12 +21,13 @@ use cumulus_test_client::{ runtime::{ self as test_runtime, Block, Hash, Header, TestPalletCall, UncheckedExtrinsic, WASM_BINARY, }, - transfer, BlockData, BuildParachainBlockData, Client, DefaultTestClientBuilderExt, HeadData, - InitBlockBuilder, TestClientBuilder, TestClientBuilderExt, ValidationParams, + transfer, BlockData, BlockOrigin, BuildParachainBlockData, Client, ClientBlockImportExt, + DefaultTestClientBuilderExt, HeadData, InitBlockBuilder, TestClientBuilder, + TestClientBuilderExt, ValidationParams, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use sp_keyring::AccountKeyring::*; -use sp_runtime::traits::Header as HeaderT; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{env, process::Command}; use crate::validate_block::MemoryOptimizedValidationParams; @@ -100,7 +101,7 @@ fn build_block_with_witness( } #[test] -fn validate_block_no_extra_extrinsics() { +fn validate_block_works() { sp_tracing::try_init_simple(); let (client, parent_head) = create_test_client(); @@ -290,3 +291,42 @@ fn validation_params_and_memory_optimized_validation_params_encode_and_decode() let decoded = ValidationParams::decode_all(&mut &encoded[..]).unwrap(); assert_eq!(decoded, validation_params); } + +/// Test for ensuring that we are differentiating in the `validation::trie_cache` between different +/// child tries. +/// +/// This is achieved by first building a block using `read_and_write_child_tries` that should set +/// the values in the child tries. In the second step we are building a second block with the same +/// extrinsic that reads the values from the child tries and it asserts that we read the correct +/// data from the state. +#[test] +fn validate_block_works_with_child_tries() { + sp_tracing::try_init_simple(); + + let (mut client, parent_head) = create_test_client(); + let TestBlockData { block, .. } = build_block_with_witness( + &client, + vec![generate_extrinsic(&client, Charlie, TestPalletCall::read_and_write_child_tries {})], + parent_head.clone(), + Default::default(), + ); + let block = block.into_block(); + + futures::executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + + let parent_head = block.header().clone(); + + let TestBlockData { block, validation_data } = build_block_with_witness( + &client, + vec![generate_extrinsic(&client, Alice, TestPalletCall::read_and_write_child_tries {})], + parent_head.clone(), + Default::default(), + ); + + let header = block.header().clone(); + + let res_header = + call_validate_block(parent_head, block, validation_data.relay_parent_storage_root) + .expect("Calls `validate_block`"); + assert_eq!(header, res_header); +} diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs index 57876b87876..5d785910fbe 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_cache.rs @@ -67,7 +67,13 @@ impl<'a, H: Hasher> trie_db::TrieCache> for TrieCache<'a, H> { /// Provider of [`TrieCache`] instances. pub(crate) struct CacheProvider { node_cache: RefCell>>, - value_cache: RefCell, trie_db::CachedValue>>, + /// Cache: `storage_root` => `storage_key` => `value`. + /// + /// One `block` can for example use multiple tries (child tries) and we need to distinguish the + /// cached (`storage_key`, `value`) between them. For this we are using the `storage_root` to + /// distinguish them (even if the storage root is the same for two child tries, it just means + /// that both are exactly the same trie and there would happen no collision). + value_cache: RefCell, trie_db::CachedValue>>>, } impl CacheProvider { @@ -81,22 +87,26 @@ impl CacheProvider { impl TrieCacheProvider for CacheProvider { type Cache<'a> = TrieCache<'a, H> where H: 'a; - fn as_trie_db_cache(&self, _storage_root: ::Out) -> Self::Cache<'_> { + fn as_trie_db_cache(&self, storage_root: ::Out) -> Self::Cache<'_> { TrieCache { - value_cache: Some(self.value_cache.borrow_mut()), + value_cache: Some(RefMut::map(self.value_cache.borrow_mut(), |c| { + c.entry(storage_root).or_default() + })), node_cache: self.node_cache.borrow_mut(), } } fn as_trie_db_mut_cache(&self) -> Self::Cache<'_> { // This method is called when we calculate the storage root. - // Since we are using a simplified cache architecture, - // we do not have separate key spaces for different storage roots. - // The value cache is therefore disabled here. + // We are not interested in caching new values (as we would throw them away directly after a + // block is validated) and thus, we don't pass any `value_cache`. TrieCache { value_cache: None, node_cache: self.node_cache.borrow_mut() } } - fn merge<'a>(&'a self, _other: Self::Cache<'a>, _new_root: ::Out) {} + fn merge<'a>(&'a self, _other: Self::Cache<'a>, _new_root: ::Out) { + // This is called to merge the `value_cache` from `as_trie_db_mut_cache`, which is not + // activated, so we don't need to do anything here. + } } // This is safe here since we are single-threaded in WASM diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index a0763c9069c..5d11b7f490c 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -49,6 +49,30 @@ pub mod pallet { ); Ok(()) } + + /// A dispatchable that first reads two values from two different child tries, asserts they + /// are the expected values (if the values exist in the state) and then writes two different + /// values to these child tries. + #[pallet::weight(0)] + pub fn read_and_write_child_tries(_: OriginFor) -> DispatchResult { + let key = &b"hello"[..]; + let first_trie = &b"first"[..]; + let second_trie = &b"second"[..]; + let first_value = "world1".encode(); + let second_value = "world2".encode(); + + if let Some(res) = sp_io::default_child_storage::get(first_trie, key) { + assert_eq!(first_value, res); + } + if let Some(res) = sp_io::default_child_storage::get(second_trie, key) { + assert_eq!(second_value, res); + } + + sp_io::default_child_storage::set(first_trie, key, &first_value); + sp_io::default_child_storage::set(second_trie, key, &second_value); + + Ok(()) + } } #[derive(frame_support::DefaultNoBound)] diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index afa80addd2b..7b337b5fd54 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -52,7 +52,10 @@ pub trait TrieCacheProvider { /// Return a [`trie_db::TrieDB`] compatible cache. /// - /// The `storage_root` parameter should be the storage root of the used trie. + /// The `storage_root` parameter *must* be the storage root of the trie this cache is used for. + /// + /// NOTE: Implementors should use the `storage_root` to differentiate between storage keys that + /// may belong to different tries. fn as_trie_db_cache(&self, storage_root: H::Out) -> Self::Cache<'_>; /// Returns a cache that can be used with a [`trie_db::TrieDBMut`]. diff --git a/substrate/test-utils/client/src/client_ext.rs b/substrate/test-utils/client/src/client_ext.rs index 8efa7b5f07f..73581a4f0ef 100644 --- a/substrate/test-utils/client/src/client_ext.rs +++ b/substrate/test-utils/client/src/client_ext.rs @@ -20,9 +20,11 @@ use sc_client_api::{backend::Finalizer, client::BlockBackend}; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_service::client::Client; -use sp_consensus::{BlockOrigin, Error as ConsensusError}; +use sp_consensus::Error as ConsensusError; use sp_runtime::{traits::Block as BlockT, Justification, Justifications}; +pub use sp_consensus::BlockOrigin; + /// Extension trait for a test client. pub trait ClientExt: Sized { /// Finalize a block. diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index 90e15e0f8d5..084dd2a1861 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -21,7 +21,7 @@ pub mod client_ext; -pub use self::client_ext::{ClientBlockImportExt, ClientExt}; +pub use self::client_ext::{BlockOrigin, ClientBlockImportExt, ClientExt}; pub use sc_client_api::{execution_extensions::ExecutionExtensions, BadBlocks, ForkBlocks}; pub use sc_client_db::{self, Backend, BlocksPruning}; pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod, WasmExecutor}; -- GitLab From 50390950d87fb745ec5ff46be698cac93113efb7 Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Wed, 8 Nov 2023 15:21:58 +0100 Subject: [PATCH 009/199] Refactor candidate validation messages (#2219) --- polkadot/node/core/approval-voting/src/lib.rs | 14 +- .../node/core/approval-voting/src/tests.rs | 8 +- polkadot/node/core/backing/src/lib.rs | 16 +- polkadot/node/core/backing/src/tests/mod.rs | 369 +++++++++--------- .../src/tests/prospective_parachains.rs | 69 ++-- .../node/core/candidate-validation/src/lib.rs | 27 +- .../src/participation/mod.rs | 16 +- .../src/participation/tests.rs | 26 +- polkadot/node/core/pvf-checker/src/lib.rs | 6 +- polkadot/node/core/pvf-checker/src/tests.rs | 7 +- polkadot/node/malus/src/variants/common.rs | 84 ++-- .../node/overseer/examples/minimal-example.rs | 12 +- polkadot/node/overseer/src/tests.rs | 24 +- polkadot/node/subsystem-types/src/messages.rs | 60 +-- 14 files changed, 384 insertions(+), 354 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index e7cbde56739..94f7fcaf941 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -2861,15 +2861,15 @@ async fn launch_approval( let (val_tx, val_rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::ValidateFromExhaustive( - available_data.validation_data, + .send_message(CandidateValidationMessage::ValidateFromExhaustive { + validation_data: available_data.validation_data, validation_code, - candidate.clone(), - available_data.pov, + candidate_receipt: candidate.clone(), + pov: available_data.pov, executor_params, - PvfExecTimeoutKind::Approval, - val_tx, - )) + exec_timeout_kind: PvfExecTimeoutKind::Approval, + response_sender: val_tx, + }) .await; match val_rx.await { diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 13213c158cd..0c0dcfde9b6 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -2704,8 +2704,12 @@ async fn handle_double_assignment_import( assert_matches!( overseer_recv(virtual_overseer).await, - AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, _, timeout, tx)) if timeout == PvfExecTimeoutKind::Approval => { - tx.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + exec_timeout_kind, + response_sender, + .. + }) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) .unwrap(); } ); diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 27b5972d982..a91eefe5e04 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -551,8 +551,8 @@ async fn request_pov( async fn request_candidate_validation( sender: &mut impl overseer::CandidateBackingSenderTrait, - pvd: PersistedValidationData, - code: ValidationCode, + validation_data: PersistedValidationData, + validation_code: ValidationCode, candidate_receipt: CandidateReceipt, pov: Arc, executor_params: ExecutorParams, @@ -560,15 +560,15 @@ async fn request_candidate_validation( let (tx, rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::ValidateFromExhaustive( - pvd, - code, + .send_message(CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, pov, executor_params, - PvfExecTimeoutKind::Backing, - tx, - )) + exec_timeout_kind: PvfExecTimeoutKind::Backing, + response_sender: tx, + }) .await; match rx.await { diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index 35d17f3d905..caa85c12989 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -329,32 +329,32 @@ async fn assert_validation_requests( async fn assert_validate_from_exhaustive( virtual_overseer: &mut VirtualOverseer, - pvd: &PersistedValidationData, - pov: &PoV, - validation_code: &ValidationCode, - candidate: &CommittedCandidateReceipt, + assert_pvd: &PersistedValidationData, + assert_pov: &PoV, + assert_validation_code: &ValidationCode, + assert_candidate: &CommittedCandidateReceipt, expected_head_data: &HeadData, result_validation_data: PersistedValidationData, ) { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + pov, + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == *pvd && - _validation_code == *validation_code && - *_pov == *pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && - candidate.commitments.hash() == candidate_receipt.commitments_hash => + exec_timeout_kind, + response_sender, + .. + }, + ) if validation_data == *assert_pvd && + validation_code == *assert_validation_code && + *pov == *assert_pov && &candidate_receipt.descriptor == assert_candidate.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && + candidate_receipt.commitments_hash == assert_candidate.commitments.hash() => { - tx.send(Ok(ValidationResult::Valid( + response_sender.send(Ok(ValidationResult::Valid( CandidateCommitments { head_data: expected_head_data.clone(), horizontal_messages: Default::default(), @@ -461,11 +461,11 @@ fn backing_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_ab = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd_ab = dummy_pvd(); + let validation_code_ab = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_ab.hash(); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -474,8 +474,8 @@ fn backing_works() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_ab.clone(), pvd_ab.clone()), + validation_code: validation_code_ab.0.clone(), ..Default::default() } .build(); @@ -498,7 +498,7 @@ fn backing_works() { let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd_ab.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -523,7 +523,7 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_ab.clone()).await; // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is the PoV. @@ -536,7 +536,7 @@ fn backing_works() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_ab.clone()).unwrap(); } ); @@ -545,22 +545,22 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && - timeout == PvfExecTimeoutKind::Backing && - candidate_a_commitments_hash == candidate_receipt.commitments_hash => + pov, + exec_timeout_kind, + response_sender, + .. + }, + ) if validation_data == pvd_ab && + validation_code == validation_code_ab && + *pov == pov_ab && &candidate_receipt.descriptor == candidate_a.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && + candidate_receipt.commitments_hash == candidate_a_commitments_hash => { - tx.send(Ok( + response_sender.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Default::default(), @@ -623,11 +623,11 @@ fn backing_works_while_validation_ongoing() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_abc = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd_abc = dummy_pvd(); + let validation_code_abc = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_abc.hash(); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -636,8 +636,8 @@ fn backing_works_while_validation_ongoing() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_abc.clone(), pvd_abc.clone()), + validation_code: validation_code_abc.0.clone(), ..Default::default() } .build(); @@ -666,7 +666,7 @@ fn backing_works_while_validation_ongoing() { let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd_abc.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -701,7 +701,7 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_abc.clone()).await; // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is PoV from the @@ -715,7 +715,7 @@ fn backing_works_while_validation_ongoing() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_abc.clone()).unwrap(); } ); @@ -724,24 +724,24 @@ fn backing_works_while_validation_ongoing() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_timeout_kind, + response_sender, + .. + }, + ) if validation_data == pvd_abc && + validation_code == validation_code_abc && + *pov == pov_abc && &candidate_receipt.descriptor == candidate_a.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { // we never validate the candidate. our local node // shouldn't issue any statements. - std::mem::forget(tx); + std::mem::forget(response_sender); } ); @@ -812,11 +812,11 @@ fn backing_misbehavior_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pov_a = PoV { block_data: BlockData(vec![1, 2, 3]) }; - let pov_hash = pov.hash(); - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_hash = pov_a.hash(); + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -824,9 +824,9 @@ fn backing_misbehavior_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), head_data: expected_head_data.clone(), - validation_code: validation_code.0.clone(), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -842,7 +842,7 @@ fn backing_misbehavior_works() { .expect("Insert key into keystore"); let seconded_2 = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -867,7 +867,7 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( virtual_overseer.recv().await, @@ -878,29 +878,29 @@ fn backing_misbehavior_works() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_a.clone()).unwrap(); } ); assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_timeout_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && &candidate_receipt.descriptor == candidate_a.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { - tx.send(Ok( + response_sender.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Default::default(), @@ -1052,22 +1052,22 @@ fn backing_dont_second_invalid() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd_a && - _validation_code == validation_code_a && - *_pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_timeout_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && candidate_a.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1092,22 +1092,22 @@ fn backing_dont_second_invalid() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if pvd == pvd_b && - _validation_code == validation_code_b && - *_pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_timeout_kind, + response_sender, + .. + }, + ) if validation_data == pvd_b && + validation_code == validation_code_b && + *pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && candidate_b.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok( + response_sender.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Default::default(), @@ -1158,19 +1158,19 @@ fn backing_second_after_first_fails_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_a.hash(); let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -1184,7 +1184,7 @@ fn backing_second_after_first_fails_works() { let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), &test_state.signing_context, ValidatorIndex(2), &validator2.into(), @@ -1199,7 +1199,7 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. assert_matches!( @@ -1211,7 +1211,7 @@ fn backing_second_after_first_fails_works() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_a.clone()).unwrap(); } ); @@ -1219,22 +1219,22 @@ fn backing_second_after_first_fails_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_timeout_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1243,8 +1243,8 @@ fn backing_second_after_first_fails_works() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate.to_plain(), - pvd.clone(), - pov.clone(), + pvd_a.clone(), + pov_a.clone(), ); virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; @@ -1287,7 +1287,7 @@ fn backing_second_after_first_fails_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, pov, ..), + CandidateValidationMessage::ValidateFromExhaustive { pov, .. }, ) => { assert_eq!(&*pov, &pov_to_second); } @@ -1304,18 +1304,18 @@ fn backing_works_after_failed_validation() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_a.hash(); let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -1328,7 +1328,7 @@ fn backing_works_after_failed_validation() { .expect("Insert key into keystore"); let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1343,7 +1343,7 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. assert_matches!( @@ -1355,7 +1355,7 @@ fn backing_works_after_failed_validation() { .. } ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_a.clone()).unwrap(); } ); @@ -1363,22 +1363,22 @@ fn backing_works_after_failed_validation() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + pov, + exec_timeout_kind, + response_sender, + .. + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); + response_sender.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); } ); @@ -1475,19 +1475,19 @@ fn retry_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { test_startup(&mut virtual_overseer, &test_state).await; - let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; - let pvd = dummy_pvd(); - let validation_code = ValidationCode(vec![1, 2, 3]); + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); - let pov_hash = pov.hash(); + let pov_hash = pov_a.hash(); let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), - persisted_validation_data_hash: pvd.hash(), - validation_code: validation_code.0.clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -1512,7 +1512,7 @@ fn retry_works() { .expect("Insert key into keystore"); let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd_a.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1546,7 +1546,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; // Subsystem requests PoV and requests validation. // We cancel - should mean retry on next backing statement. @@ -1584,8 +1584,8 @@ fn retry_works() { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ValidationCodeByHash(hash, tx), - )) if hash == validation_code.hash() => { - tx.send(Ok(Some(validation_code.clone()))).unwrap(); + )) if hash == validation_code_a.hash() => { + tx.send(Ok(Some(validation_code_a.clone()))).unwrap(); }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, @@ -1609,7 +1609,7 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - assert_validation_requests(&mut virtual_overseer, validation_code.clone()).await; + assert_validation_requests(&mut virtual_overseer, validation_code_a.clone()).await; assert_matches!( virtual_overseer.recv().await, @@ -1622,26 +1622,25 @@ fn retry_works() { // Subsystem requests PoV and requests validation. // Now we pass. ) if relay_parent == test_state.relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(pov_a.clone()).unwrap(); } ); assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, + pov, + exec_timeout_kind, .. - ), - ) if _pvd == pvd && - _validation_code == validation_code && - *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + }, + ) if validation_data == pvd_a && + validation_code == validation_code_a && + *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && + exec_timeout_kind == PvfExecTimeoutKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash ); virtual_overseer @@ -1863,9 +1862,9 @@ fn cannot_second_multiple_candidates_per_parent() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(.., tx), + CandidateValidationMessage::ValidateFromExhaustive { response_sender, .. }, ) => { - tx.send(Ok(ValidationResult::Valid( + response_sender.send(Ok(ValidationResult::Valid( CandidateCommitments { head_data: expected_head_data.clone(), horizontal_messages: Default::default(), diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index b79515ed37a..fc4bd7d98e7 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -202,13 +202,13 @@ async fn assert_validate_seconded_candidate( virtual_overseer: &mut VirtualOverseer, relay_parent: Hash, candidate: &CommittedCandidateReceipt, - pov: &PoV, - pvd: &PersistedValidationData, - validation_code: &ValidationCode, + assert_pov: &PoV, + assert_pvd: &PersistedValidationData, + assert_validation_code: &ValidationCode, expected_head_data: &HeadData, fetch_pov: bool, ) { - assert_validation_requests(virtual_overseer, validation_code.clone()).await; + assert_validation_requests(virtual_overseer, assert_validation_code.clone()).await; if fetch_pov { assert_matches!( @@ -220,29 +220,29 @@ async fn assert_validate_seconded_candidate( .. } ) if hash == relay_parent => { - tx.send(pov.clone()).unwrap(); + tx.send(assert_pov.clone()).unwrap(); } ); } assert_matches!( virtual_overseer.recv().await, - AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive( - _pvd, - _validation_code, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { + validation_data, + validation_code, candidate_receipt, - _pov, - _, - timeout, - tx, - )) if &_pvd == pvd && - &_validation_code == validation_code && - &*_pov == pov && + pov, + exec_timeout_kind, + response_sender, + .. + }) if &validation_data == assert_pvd && + &validation_code == assert_validation_code && + &*pov == assert_pov && &candidate_receipt.descriptor == candidate.descriptor() && - timeout == PvfExecTimeoutKind::Backing && + exec_timeout_kind == PvfExecTimeoutKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok(ValidationResult::Valid( + response_sender.send(Ok(ValidationResult::Valid( CandidateCommitments { head_data: expected_head_data.clone(), horizontal_messages: Default::default(), @@ -251,7 +251,7 @@ async fn assert_validate_seconded_candidate( processed_downward_messages: 0, hrmp_watermark: 0, }, - pvd.clone(), + assert_pvd.clone(), ))) .unwrap(); } @@ -1293,9 +1293,13 @@ fn concurrent_dependent_candidates() { tx.send(pov.clone()).unwrap(); }, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(.., candidate, _, _, _, tx), + CandidateValidationMessage::ValidateFromExhaustive { + candidate_receipt, + response_sender, + .. + }, ) => { - let candidate_hash = candidate.hash(); + let candidate_hash = candidate_receipt.hash(); let (head_data, pvd) = if candidate_hash == candidate_a_hash { (&head_data[1], &pvd_a) } else if candidate_hash == candidate_b_hash { @@ -1303,18 +1307,19 @@ fn concurrent_dependent_candidates() { } else { panic!("unknown candidate hash") }; - tx.send(Ok(ValidationResult::Valid( - CandidateCommitments { - head_data: head_data.clone(), - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: 0, - }, - pvd.clone(), - ))) - .unwrap(); + response_sender + .send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: head_data.clone(), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + pvd.clone(), + ))) + .unwrap(); }, AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { tx, diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 93db7d11cee..89ea0272884 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -159,13 +159,14 @@ async fn run( FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => match msg { - CandidateValidationMessage::ValidateFromChainState( + CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, response_sender, - ) => { + .. + } => { let bg = { let mut sender = ctx.sender().clone(); let metrics = metrics.clone(); @@ -179,7 +180,7 @@ async fn run( candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, &metrics, ) .await; @@ -191,15 +192,16 @@ async fn run( ctx.spawn("validate-from-chain-state", bg.boxed())?; }, - CandidateValidationMessage::ValidateFromExhaustive( - persisted_validation_data, + CandidateValidationMessage::ValidateFromExhaustive { + validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, response_sender, - ) => { + .. + } => { let bg = { let metrics = metrics.clone(); let validation_host = validation_host.clone(); @@ -208,12 +210,12 @@ async fn run( let _timer = metrics.time_validate_from_exhaustive(); let res = validate_candidate_exhaustive( validation_host, - persisted_validation_data, + validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, &metrics, ) .await; @@ -225,11 +227,12 @@ async fn run( ctx.spawn("validate-from-exhaustive", bg.boxed())?; }, - CandidateValidationMessage::PreCheck( + CandidateValidationMessage::PreCheck { relay_parent, validation_code_hash, response_sender, - ) => { + .. + } => { let bg = { let mut sender = ctx.sender().clone(); let validation_host = validation_host.clone(); diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index 35d07b411e8..90268516e9d 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -380,15 +380,15 @@ async fn participate( // same level of leeway. let (validation_tx, validation_rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::ValidateFromExhaustive( - available_data.validation_data, + .send_message(CandidateValidationMessage::ValidateFromExhaustive { + validation_data: available_data.validation_data, validation_code, - req.candidate_receipt().clone(), - available_data.pov, - req.executor_params(), - PvfExecTimeoutKind::Approval, - validation_tx, - )) + candidate_receipt: req.candidate_receipt().clone(), + pov: available_data.pov, + executor_params: req.executor_params(), + exec_timeout_kind: PvfExecTimeoutKind::Approval, + response_sender: validation_tx, + }) .await; // we cast votes (either positive or negative) depending on the outcome of diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index ee6b950720e..0aa0d772005 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -115,12 +115,12 @@ pub async fn participation_full_happy_path( assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, candidate_receipt, _, _, timeout, tx) - ) if timeout == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_timeout_kind, response_sender, .. } + ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { if expected_commitments_hash != candidate_receipt.commitments_hash { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); } else { - tx.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); + response_sender.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); } }, "overseer did not receive candidate validation message", @@ -450,9 +450,9 @@ fn cast_invalid_vote_if_validation_fails_or_is_invalid() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, _, timeout, tx) - ) if timeout == PvfExecTimeoutKind::Approval => { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); + CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } + ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); }, "overseer did not receive candidate validation message", ); @@ -487,9 +487,9 @@ fn cast_invalid_vote_if_commitments_dont_match() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, _, timeout, tx) - ) if timeout == PvfExecTimeoutKind::Approval => { - tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); + CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } + ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); }, "overseer did not receive candidate validation message", ); @@ -524,9 +524,9 @@ fn cast_valid_vote_if_validation_passes() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive(_, _, _, _, _, timeout, tx) - ) if timeout == PvfExecTimeoutKind::Approval => { - tx.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); + CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } + ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + response_sender.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); }, "overseer did not receive candidate validation message", ); diff --git a/polkadot/node/core/pvf-checker/src/lib.rs b/polkadot/node/core/pvf-checker/src/lib.rs index 2946f3f7886..043da00eba1 100644 --- a/polkadot/node/core/pvf-checker/src/lib.rs +++ b/polkadot/node/core/pvf-checker/src/lib.rs @@ -535,7 +535,11 @@ async fn initiate_precheck( let (tx, rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::PreCheck(relay_parent, validation_code_hash, tx)) + .send_message(CandidateValidationMessage::PreCheck { + relay_parent, + validation_code_hash, + response_sender: tx, + }) .await; let timer = metrics.time_pre_check_judgement(); diff --git a/polkadot/node/core/pvf-checker/src/tests.rs b/polkadot/node/core/pvf-checker/src/tests.rs index 4ac6e5eba31..b0401ecdc3b 100644 --- a/polkadot/node/core/pvf-checker/src/tests.rs +++ b/polkadot/node/core/pvf-checker/src/tests.rs @@ -267,11 +267,12 @@ impl TestState { handle: &mut VirtualOverseer, ) -> ExpectCandidatePrecheck { match self.recv_timeout(handle).await.expect("timeout waiting for a message") { - AllMessages::CandidateValidation(CandidateValidationMessage::PreCheck( + AllMessages::CandidateValidation(CandidateValidationMessage::PreCheck { relay_parent, validation_code_hash, - tx, - )) => ExpectCandidatePrecheck { relay_parent, validation_code_hash, tx }, + response_sender, + .. + }) => ExpectCandidatePrecheck { relay_parent, validation_code_hash, tx: response_sender }, msg => panic!("Unexpected message was received: {:#?}", msg), } } diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 365b2f16ac2..474887ee8df 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -273,30 +273,31 @@ where // Message sent by the approval voting subsystem FromOrchestra::Communication { msg: - CandidateValidationMessage::ValidateFromExhaustive( + CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_timeout_kind, + response_sender, + .. + }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(timeout) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_timeout_kind) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromExhaustive( + msg: CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_timeout_kind, + response_sender, + }, }) } // Create the fake response with probability `p` if the `PoV` is malicious, @@ -313,7 +314,7 @@ where create_validation_response( validation_data, candidate_receipt.descriptor, - sender, + response_sender, ); None }, @@ -326,20 +327,20 @@ where ); Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromExhaustive( + msg: CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_timeout_kind, + response_sender, + }, }) }, } }, - x if x.misbehaves_invalid() && x.should_misbehave(timeout) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_timeout_kind) => { // Set the validation result to invalid with probability `p` and trigger a // dispute let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -358,7 +359,7 @@ where // We're not even checking the candidate, this makes us appear // faster than honest validators. - sender.send(Ok(validation_result)).unwrap(); + response_sender.send(Ok(validation_result)).unwrap(); None }, false => { @@ -366,56 +367,57 @@ where gum::info!(target: MALUS, "😈 'Decided' to not act maliciously.",); Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromExhaustive( + msg: CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_timeout_kind, + response_sender, + }, }) }, } }, // Handle FakeCandidateValidation::Disabled _ => Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromExhaustive( + msg: CandidateValidationMessage::ValidateFromExhaustive { validation_data, validation_code, candidate_receipt, pov, executor_params, - timeout, - sender, - ), + exec_timeout_kind, + response_sender, + }, }), } }, // Behaviour related to the backing subsystem FromOrchestra::Communication { msg: - CandidateValidationMessage::ValidateFromChainState( + CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, response_sender, - ), + .. + }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(timeout) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_timeout_kind) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState( + msg: CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, response_sender, - ), + }, }) } // If the `PoV` is malicious, back the candidate with some probability `p`, @@ -439,17 +441,17 @@ where // If the `PoV` is malicious, we behave normally with some probability // `(1-p)` false => Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState( + msg: CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, response_sender, - ), + }, }), } }, - x if x.misbehaves_invalid() && x.should_misbehave(timeout) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_timeout_kind) => { // Maliciously set the validation result to invalid for a valid candidate // with probability `p` let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -473,25 +475,25 @@ where gum::info!(target: MALUS, "😈 'Decided' to not act maliciously.",); Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState( + msg: CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, response_sender, - ), + }, }) }, } }, _ => Some(FromOrchestra::Communication { - msg: CandidateValidationMessage::ValidateFromChainState( + msg: CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, executor_params, - timeout, + exec_timeout_kind, response_sender, - ), + }, }), } }, diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs index cffdfd9f8aa..b2c0ea2f75a 100644 --- a/polkadot/node/overseer/examples/minimal-example.rs +++ b/polkadot/node/overseer/examples/minimal-example.rs @@ -73,13 +73,13 @@ impl Subsystem1 { commitments_hash: Hash::zero(), }; - let msg = CandidateValidationMessage::ValidateFromChainState( + let msg = CandidateValidationMessage::ValidateFromChainState { candidate_receipt, - PoV { block_data: BlockData(Vec::new()) }.into(), - Default::default(), - PvfExecTimeoutKind::Backing, - tx, - ); + pov: PoV { block_data: BlockData(Vec::new()) }.into(), + executor_params: Default::default(), + exec_timeout_kind: PvfExecTimeoutKind::Backing, + response_sender: tx, + }; ctx.send_message(msg).await; } () diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index c17613fb7ea..254f5fe4512 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -102,13 +102,13 @@ where }; let (tx, _) = oneshot::channel(); - ctx.send_message(CandidateValidationMessage::ValidateFromChainState( + ctx.send_message(CandidateValidationMessage::ValidateFromChainState { candidate_receipt, - PoV { block_data: BlockData(Vec::new()) }.into(), - Default::default(), - PvfExecTimeoutKind::Backing, - tx, - )) + pov: PoV { block_data: BlockData(Vec::new()) }.into(), + executor_params: Default::default(), + exec_timeout_kind: PvfExecTimeoutKind::Backing, + response_sender: tx, + }) .await; c += 1; continue @@ -793,20 +793,20 @@ where } fn test_candidate_validation_msg() -> CandidateValidationMessage { - let (sender, _) = oneshot::channel(); + let (response_sender, _) = oneshot::channel(); let pov = Arc::new(PoV { block_data: BlockData(Vec::new()) }); let candidate_receipt = CandidateReceipt { descriptor: dummy_candidate_descriptor(dummy_hash()), commitments_hash: Hash::zero(), }; - CandidateValidationMessage::ValidateFromChainState( + CandidateValidationMessage::ValidateFromChainState { candidate_receipt, pov, - Default::default(), - PvfExecTimeoutKind::Backing, - sender, - ) + executor_params: Default::default(), + exec_timeout_kind: PvfExecTimeoutKind::Backing, + response_sender, + } } fn test_candidate_backing_msg() -> CandidateBackingMessage { diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index ce4fa6633dc..4ddffc6dc5e 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -143,14 +143,18 @@ pub enum CandidateValidationMessage { /// /// If there is no state available which can provide this data or the core for /// the para is not free at the relay-parent, an error is returned. - ValidateFromChainState( - CandidateReceipt, - Arc, - ExecutorParams, - /// Execution timeout - PvfExecTimeoutKind, - oneshot::Sender>, - ), + ValidateFromChainState { + /// The candidate receipt + candidate_receipt: CandidateReceipt, + /// The proof-of-validity + pov: Arc, + /// Session's executor parameters + executor_params: ExecutorParams, + /// Execution timeout kind (backing/approvals) + exec_timeout_kind: PvfExecTimeoutKind, + /// The sending side of the response channel + response_sender: oneshot::Sender>, + }, /// Validate a candidate with provided, exhaustive parameters for validation. /// /// Explicitly provide the `PersistedValidationData` and `ValidationCode` so this can do full @@ -160,27 +164,35 @@ pub enum CandidateValidationMessage { /// cases where the validity of the candidate is established. This is the case for the typical /// use-case: secondary checkers would use this request relying on the full prior checks /// performed by the relay-chain. - ValidateFromExhaustive( - PersistedValidationData, - ValidationCode, - CandidateReceipt, - Arc, - ExecutorParams, - /// Execution timeout - PvfExecTimeoutKind, - oneshot::Sender>, - ), + ValidateFromExhaustive { + /// Persisted validation data + validation_data: PersistedValidationData, + /// Validation code + validation_code: ValidationCode, + /// The candidate receipt + candidate_receipt: CandidateReceipt, + /// The proof-of-validity + pov: Arc, + /// Session's executor parameters + executor_params: ExecutorParams, + /// Execution timeout kind (backing/approvals) + exec_timeout_kind: PvfExecTimeoutKind, + /// The sending side of the response channel + response_sender: oneshot::Sender>, + }, /// Try to compile the given validation code and send back /// the outcome. /// /// The validation code is specified by the hash and will be queried from the runtime API at /// the given relay-parent. - PreCheck( - // Relay-parent - Hash, - ValidationCodeHash, - oneshot::Sender, - ), + PreCheck { + /// Relay-parent + relay_parent: Hash, + /// Validation code hash + validation_code_hash: ValidationCodeHash, + /// The sending side of the response channel + response_sender: oneshot::Sender, + }, } /// Messages received by the Collator Protocol subsystem. -- GitLab From ffa0e30e58234d85e24a3a55b088fb676ef71bfd Mon Sep 17 00:00:00 2001 From: Ignacio Palacios Date: Wed, 8 Nov 2023 16:02:03 +0100 Subject: [PATCH 010/199] [xcm-emulator] Chains generic over Network & Integration tests restructure (#2092) Closes: - #1383 - Declared chains can be now be imported and reused in a different crate. - Chain declaration are now generic over a generic type `N` (the Network) - #1389 - Solved #1383, chains and networks declarations can be restructure to avoid having to compile all chains when running integrations tests where are not needed. - Chains are now declared on its own crate (removed from `integration-tests-common`) - Networks are now declared on its own crate (removed from `integration-tests-common`) - Integration tests will import only the relevant Network crate - `integration-tests-common` is renamed to `emulated-integration-tests-common` All this is necessary to be able to implement what is described here: https://github.com/paritytech/roadmap/issues/56#issuecomment-1777010553 --------- Co-authored-by: command-bot <> --- .gitignore | 1 - Cargo.lock | 332 +++-- Cargo.toml | 20 +- .../src/tests/reserve_transfer.rs | 412 ------ .../assets/asset-hub-rococo/Cargo.toml | 56 - .../assets/asset-hub-westend/Cargo.toml | 71 - .../bridges/bridge-hub-rococo/Cargo.toml | 41 - .../bridges/bridge-hub-rococo/src/lib.rs | 67 - .../bridges/bridge-hub-westend/Cargo.toml | 41 - .../bridges/bridge-hub-westend/src/lib.rs | 67 - .../assets/asset-hub-rococo/Cargo.toml | 25 + .../assets/asset-hub-rococo/src/genesis.rs | 70 + .../assets/asset-hub-rococo/src/lib.rs | 55 + .../assets/asset-hub-westend/Cargo.toml | 25 + .../assets/asset-hub-westend/src/genesis.rs | 67 + .../assets/asset-hub-westend/src/lib.rs | 55 + .../assets/asset-hub-wococo/Cargo.toml | 26 + .../assets/asset-hub-wococo/src/lib.rs | 53 + .../bridges/bridge-hub-rococo/Cargo.toml | 24 + .../bridges/bridge-hub-rococo/src/genesis.rs | 83 ++ .../bridges/bridge-hub-rococo/src/lib.rs | 49 + .../bridges/bridge-hub-westend/Cargo.toml | 24 + .../bridges/bridge-hub-westend/src/genesis.rs | 75 + .../bridges/bridge-hub-westend/src/lib.rs | 49 + .../bridges/bridge-hub-wococo/Cargo.toml | 25 + .../bridges/bridge-hub-wococo/src/lib.rs | 47 + .../parachains/testing/penpal/Cargo.toml | 24 + .../parachains/testing/penpal/src/genesis.rs | 71 + .../parachains/testing/penpal/src/lib.rs | 65 + .../emulated/chains/relays/rococo/Cargo.toml | 29 + .../chains/relays/rococo/src/genesis.rs | 101 ++ .../emulated/chains/relays/rococo/src/lib.rs | 48 + .../emulated/chains/relays/westend/Cargo.toml | 30 + .../chains/relays/westend/src/genesis.rs | 109 ++ .../emulated/chains/relays/westend/src/lib.rs | 50 + .../emulated/chains/relays/wococo/Cargo.toml | 30 + .../emulated/chains/relays/wococo/src/lib.rs | 46 + .../emulated/common/Cargo.toml | 62 +- .../emulated/common/src/constants.rs | 1219 ----------------- .../emulated/common/src/impls.rs | 121 +- .../emulated/common/src/lib.rs | 470 ++----- .../emulated/common/src/macros.rs | 49 +- .../emulated/common/src/xcm_helpers.rs | 3 + .../networks/rococo-system/Cargo.toml | 16 + .../networks/rococo-system/src/lib.rs | 51 + .../networks/rococo-wococo-system/Cargo.toml | 18 + .../networks/rococo-wococo-system/src/lib.rs | 94 ++ .../networks/westend-system/Cargo.toml | 16 + .../networks/westend-system/src/lib.rs | 51 + .../networks/wococo-system/Cargo.toml | 16 + .../networks/wococo-system/src/lib.rs | 50 + .../tests/assets/asset-hub-rococo/Cargo.toml | 32 + .../assets/asset-hub-rococo/src/lib.rs | 48 +- .../assets/asset-hub-rococo/src/tests/mod.rs | 0 .../src/tests/reserve_transfer.rs | 24 +- .../assets/asset-hub-rococo/src/tests/send.rs | 13 +- .../src/tests/set_xcm_versions.rs | 0 .../assets/asset-hub-rococo/src/tests/swap.rs | 24 +- .../asset-hub-rococo/src/tests/teleport.rs | 0 .../tests/assets/asset-hub-westend/Cargo.toml | 41 + .../assets/asset-hub-westend/src/lib.rs | 46 +- .../assets/asset-hub-westend/src/tests/mod.rs | 0 .../src/tests/reserve_transfer.rs | 24 +- .../asset-hub-westend/src/tests/send.rs | 13 +- .../src/tests/set_xcm_versions.rs | 0 .../asset-hub-westend/src/tests/swap.rs | 26 +- .../asset-hub-westend/src/tests/teleport.rs | 2 - .../asset-hub-westend/src/tests/treasury.rs | 2 +- .../bridges/bridge-hub-rococo/Cargo.toml | 33 + .../bridges/bridge-hub-rococo/src/lib.rs | 60 + .../bridge-hub-rococo/src/tests/example.rs | 0 .../bridge-hub-rococo/src/tests/mod.rs | 0 .../bridge-hub-rococo/src/tests/teleport.rs | 0 .../bridges/bridge-hub-westend/Cargo.toml | 33 + .../bridges/bridge-hub-westend/src/lib.rs | 56 + .../bridge-hub-westend/src/tests/example.rs | 2 +- .../bridge-hub-westend/src/tests/mod.rs | 0 .../bridge-hub-westend/src/tests/teleport.rs | 2 +- cumulus/xcm/xcm-emulator/src/lib.rs | 234 ++-- 79 files changed, 2669 insertions(+), 2745 deletions(-) delete mode 100644 cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/reserve_transfer.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/common/src/constants.rs create mode 100644 cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/networks/rococo-system/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-rococo/src/lib.rs (67%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-rococo/src/tests/mod.rs (100%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-rococo/src/tests/reserve_transfer.rs (95%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-rococo/src/tests/send.rs (88%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs (100%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-rococo/src/tests/swap.rs (94%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-rococo/src/tests/teleport.rs (100%) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-westend/src/lib.rs (69%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-westend/src/tests/mod.rs (100%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-westend/src/tests/reserve_transfer.rs (95%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-westend/src/tests/send.rs (87%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-westend/src/tests/set_xcm_versions.rs (100%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-westend/src/tests/swap.rs (92%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-westend/src/tests/teleport.rs (99%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/assets/asset-hub-westend/src/tests/treasury.rs (98%) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs rename cumulus/parachains/integration-tests/emulated/{ => tests}/bridges/bridge-hub-rococo/src/tests/example.rs (100%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/bridges/bridge-hub-rococo/src/tests/mod.rs (100%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/bridges/bridge-hub-rococo/src/tests/teleport.rs (100%) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs rename cumulus/parachains/integration-tests/emulated/{ => tests}/bridges/bridge-hub-westend/src/tests/example.rs (96%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/bridges/bridge-hub-westend/src/tests/mod.rs (100%) rename cumulus/parachains/integration-tests/emulated/{ => tests}/bridges/bridge-hub-westend/src/tests/teleport.rs (95%) diff --git a/.gitignore b/.gitignore index 7feea8ada5c..581c417cb85 100644 --- a/.gitignore +++ b/.gitignore @@ -17,7 +17,6 @@ **/._* **/.criterion/ **/*.rs.bk -**/chains/ **/hfuzz_target/ **/hfuzz_workspace/ **/node_modules diff --git a/Cargo.lock b/Cargo.lock index 8c7121e235c..18eb00a13c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -874,6 +874,21 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "asset-hub-rococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "rococo-emulated-chain", + "serde_json", + "sp-core", + "sp-runtime", +] + [[package]] name = "asset-hub-rococo-integration-tests" version = "1.0.0" @@ -881,23 +896,19 @@ dependencies = [ "assert_matches", "asset-hub-rococo-runtime", "asset-test-utils", + "emulated-integration-tests-common", "frame-support", - "frame-system", - "integration-tests-common", "pallet-asset-conversion", "pallet-assets", "pallet-balances", "pallet-xcm", "parachains-common", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-parachains", "rococo-runtime", + "rococo-system-emulated-network", "sp-runtime", "staging-xcm", "staging-xcm-executor", - "xcm-emulator", ] [[package]] @@ -982,6 +993,21 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "asset-hub-westend-emulated-chain" +version = "0.0.0" +dependencies = [ + "asset-hub-westend-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", + "westend-emulated-chain", +] + [[package]] name = "asset-hub-westend-integration-tests" version = "1.0.0" @@ -991,9 +1017,9 @@ dependencies = [ "asset-test-utils", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", + "emulated-integration-tests-common", "frame-support", "frame-system", - "integration-tests-common", "pallet-asset-conversion", "pallet-asset-rate", "pallet-assets", @@ -1003,17 +1029,14 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", "polkadot-runtime-common", - "polkadot-runtime-parachains", "sp-runtime", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "westend-runtime", "westend-runtime-constants", - "xcm-emulator", + "westend-system-emulated-network", ] [[package]] @@ -1094,6 +1117,22 @@ dependencies = [ "westend-runtime-constants", ] +[[package]] +name = "asset-hub-wococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-emulated-chain", + "asset-hub-rococo-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", + "wococo-emulated-chain", +] + [[package]] name = "asset-test-utils" version = "1.0.0" @@ -2136,6 +2175,20 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "bridge-hub-rococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "bridge-hub-rococo-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", +] + [[package]] name = "bridge-hub-rococo-integration-tests" version = "1.0.0" @@ -2145,24 +2198,16 @@ dependencies = [ "bridge-hub-rococo-runtime", "cumulus-pallet-dmp-queue", "cumulus-pallet-xcmp-queue", + "emulated-integration-tests-common", "frame-support", - "frame-system", - "integration-tests-common", - "pallet-assets", - "pallet-balances", "pallet-bridge-messages", "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-parachains", - "sp-core", - "sp-weights", + "rococo-wococo-system-emulated-network", "staging-xcm", "staging-xcm-executor", - "xcm-emulator", ] [[package]] @@ -2298,6 +2343,20 @@ dependencies = [ "staging-xcm-executor", ] +[[package]] +name = "bridge-hub-westend-emulated-chain" +version = "0.0.0" +dependencies = [ + "bridge-hub-westend-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", +] + [[package]] name = "bridge-hub-westend-integration-tests" version = "1.0.0" @@ -2307,24 +2366,16 @@ dependencies = [ "bridge-hub-westend-runtime", "cumulus-pallet-dmp-queue", "cumulus-pallet-xcmp-queue", + "emulated-integration-tests-common", "frame-support", - "frame-system", - "integration-tests-common", - "pallet-assets", - "pallet-balances", "pallet-bridge-messages", "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-parachains", - "sp-core", - "sp-weights", "staging-xcm", "staging-xcm-executor", - "xcm-emulator", + "westend-system-emulated-network", ] [[package]] @@ -2410,6 +2461,21 @@ dependencies = [ "westend-runtime-constants", ] +[[package]] +name = "bridge-hub-wococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "bridge-hub-rococo-emulated-chain", + "bridge-hub-rococo-runtime", + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "serde_json", + "sp-core", + "sp-runtime", +] + [[package]] name = "bridge-runtime-common" version = "0.1.0" @@ -4988,6 +5054,40 @@ dependencies = [ "zeroize", ] +[[package]] +name = "emulated-integration-tests-common" +version = "1.0.0" +dependencies = [ + "asset-test-utils", + "bp-messages", + "bridge-runtime-common", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "frame-support", + "pallet-assets", + "pallet-balances", + "pallet-bridge-messages", + "pallet-im-online", + "pallet-message-queue", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "paste", + "polkadot-primitives", + "polkadot-runtime-parachains", + "polkadot-service", + "sc-consensus-grandpa", + "serde_json", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-core", + "sp-runtime", + "staging-xcm", + "xcm-emulator", +] + [[package]] name = "encode_unicode" version = "0.3.6" @@ -6735,57 +6835,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "integration-tests-common" -version = "1.0.0" -dependencies = [ - "asset-hub-kusama-runtime", - "asset-hub-polkadot-runtime", - "asset-hub-rococo-runtime", - "asset-hub-westend-runtime", - "bp-messages", - "bridge-hub-kusama-runtime", - "bridge-hub-polkadot-runtime", - "bridge-hub-rococo-runtime", - "bridge-hub-westend-runtime", - "bridge-runtime-common", - "collectives-polkadot-runtime", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "frame-support", - "pallet-assets", - "pallet-balances", - "pallet-bridge-messages", - "pallet-im-online", - "pallet-message-queue", - "pallet-staking", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "paste", - "penpal-runtime", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-parachains", - "polkadot-service", - "rococo-runtime", - "rococo-runtime-constants", - "sc-chain-spec", - "sc-consensus-grandpa", - "serde_json", - "sp-authority-discovery", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-core", - "sp-runtime", - "staging-xcm", - "westend-runtime", - "westend-runtime-constants", - "xcm-emulator", -] - [[package]] name = "interceptor" version = "0.8.2" @@ -11547,6 +11596,20 @@ dependencies = [ "base64ct", ] +[[package]] +name = "penpal-emulated-chain" +version = "0.0.0" +dependencies = [ + "cumulus-primitives-core", + "emulated-integration-tests-common", + "frame-support", + "parachains-common", + "penpal-runtime", + "serde_json", + "sp-core", + "sp-runtime", +] + [[package]] name = "penpal-runtime" version = "0.9.27" @@ -14288,6 +14351,25 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "rococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "emulated-integration-tests-common", + "pallet-im-online", + "parachains-common", + "polkadot-primitives", + "rococo-runtime", + "rococo-runtime-constants", + "sc-consensus-grandpa", + "serde_json", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-core", + "sp-runtime", +] + [[package]] name = "rococo-parachain-runtime" version = "0.1.0" @@ -14455,6 +14537,30 @@ dependencies = [ "staging-xcm", ] +[[package]] +name = "rococo-system-emulated-network" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-emulated-chain", + "bridge-hub-rococo-emulated-chain", + "emulated-integration-tests-common", + "penpal-emulated-chain", + "rococo-emulated-chain", +] + +[[package]] +name = "rococo-wococo-system-emulated-network" +version = "0.0.0" +dependencies = [ + "asset-hub-rococo-emulated-chain", + "asset-hub-wococo-emulated-chain", + "bridge-hub-rococo-emulated-chain", + "bridge-hub-wococo-emulated-chain", + "emulated-integration-tests-common", + "rococo-emulated-chain", + "wococo-emulated-chain", +] + [[package]] name = "rpassword" version = "7.2.0" @@ -20682,6 +20788,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "westend-emulated-chain" +version = "0.0.0" +dependencies = [ + "emulated-integration-tests-common", + "pallet-im-online", + "pallet-staking", + "parachains-common", + "polkadot-primitives", + "sc-consensus-grandpa", + "serde_json", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-core", + "sp-runtime", + "westend-runtime", + "westend-runtime-constants", +] + [[package]] name = "westend-runtime" version = "1.0.0" @@ -20807,6 +20933,17 @@ dependencies = [ "staging-xcm", ] +[[package]] +name = "westend-system-emulated-network" +version = "0.0.0" +dependencies = [ + "asset-hub-westend-emulated-chain", + "bridge-hub-westend-emulated-chain", + "emulated-integration-tests-common", + "penpal-emulated-chain", + "westend-emulated-chain", +] + [[package]] name = "which" version = "4.4.0" @@ -21068,6 +21205,37 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wococo-emulated-chain" +version = "0.0.0" +dependencies = [ + "emulated-integration-tests-common", + "pallet-im-online", + "parachains-common", + "polkadot-primitives", + "rococo-emulated-chain", + "rococo-runtime", + "rococo-runtime-constants", + "sc-consensus-grandpa", + "serde_json", + "sp-authority-discovery", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "wococo-system-emulated-network" +version = "0.0.0" +dependencies = [ + "asset-hub-wococo-emulated-chain", + "bridge-hub-wococo-emulated-chain", + "emulated-integration-tests-common", + "penpal-emulated-chain", + "wococo-emulated-chain", +] + [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 2344a24a5d9..3b3469e5483 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -65,11 +65,23 @@ members = [ "cumulus/parachain-template/pallets/template", "cumulus/parachain-template/runtime", "cumulus/parachains/common", - "cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo", - "cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend", - "cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo", - "cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend", + "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", + "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", "cumulus/parachains/integration-tests/emulated/common", + "cumulus/parachains/integration-tests/emulated/chains/relays/rococo", + "cumulus/parachains/integration-tests/emulated/chains/relays/wococo", + "cumulus/parachains/integration-tests/emulated/chains/relays/westend", + "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", + "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo", + "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", + "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo", + "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend", + "cumulus/parachains/integration-tests/emulated/networks/rococo-system", + "cumulus/parachains/integration-tests/emulated/networks/wococo-system", + "cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system", + "cumulus/parachains/integration-tests/emulated/networks/westend-system", "cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/parachain-info", "cumulus/parachains/pallets/ping", diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/reserve_transfer.rs deleted file mode 100644 index 312df8f7a33..00000000000 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-polkadot/src/tests/reserve_transfer.rs +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::*; - -fn relay_origin_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - Polkadot::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(629_384_000, 6_196))); - - assert_expected_events!( - Polkadot, - vec![ - // Amount to reserve transfer is transferred to System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { - from: *from == t.sender.account_id, - to: *to == Polkadot::sovereign_account_id_of( - t.args.dest - ), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn system_para_dest_assertions_incomplete(_t: RelayToSystemParaTest) { - // Errors with `UntrustedReserveLocation`, but the MQ pallet does not report back errors. - AssetHubPolkadot::assert_dmp_queue_incomplete(Some(Weight::from_parts(1_000_000_000, 0))); -} - -fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) { - AssetHubPolkadot::assert_xcm_pallet_attempted_error(Some(XcmError::Barrier)) -} - -fn system_para_to_para_assertions(t: SystemParaToParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - AssetHubPolkadot::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 676_119_000, - 6196, - ))); - - assert_expected_events!( - AssetHubPolkadot, - vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account - RuntimeEvent::Balances( - pallet_balances::Event::Transfer { from, to, amount } - ) => { - from: *from == t.sender.account_id, - to: *to == AssetHubPolkadot::sovereign_account_id_of( - t.args.dest - ), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - AssetHubPolkadot::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 676_119_000, - 6196, - ))); - - assert_expected_events!( - AssetHubPolkadot, - vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account - RuntimeEvent::Assets( - pallet_assets::Event::Transferred { asset_id, from, to, amount } - ) => { - asset_id: *asset_id == ASSET_ID, - from: *from == t.sender.account_id, - to: *to == AssetHubPolkadot::sovereign_account_id_of( - t.args.dest - ), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn relay_limited_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn relay_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn system_para_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -/// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't -/// work -#[test] -fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = POLKADOT_ED * 1000; - let test_args = TestContext { - sender: PolkadotSender::get(), - receiver: AssetHubPolkadotReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Relay Chain shoudln't work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubPolkadot::parent_location(); - let beneficiary_id = PolkadotReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_POLKADOT_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubPolkadotSender::get(), - receiver: PolkadotReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work -#[test] -fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = POLKADOT_ED * 1000; - let test_args = TestContext { - sender: PolkadotSender::get(), - receiver: AssetHubPolkadotReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work -#[test] -fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubPolkadot::parent_location(); - let beneficiary_id = PolkadotReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_POLKADOT_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubPolkadotSender::get(), - receiver: PolkadotReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubPolkadot::sibling_location_of(PenpalPolkadotA::para_id()); - let beneficiary_id = PenpalPolkadotAReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_POLKADOT_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubPolkadotSender::get(), - receiver: PenpalPolkadotAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers -} - -/// Reserve Transfers of native asset from System Parachain to Parachain should work -#[test] -fn reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubPolkadot::sibling_location_of(PenpalPolkadotA::para_id()); - let beneficiary_id = PenpalPolkadotAReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_POLKADOT_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubPolkadotSender::get(), - receiver: PenpalPolkadotAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - - assert_eq!(sender_balance_before - amount_to_send, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers -} - -/// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubPolkadot::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubPolkadotSender::get(), - ASSET_MIN_BALANCE * 1000000, - ); - - // Init values for System Parachain - let destination = AssetHubPolkadot::sibling_location_of(PenpalPolkadotA::para_id()); - let beneficiary_id = PenpalPolkadotAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { - sender: AssetHubPolkadotSender::get(), - receiver: PenpalPolkadotAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); - - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); -} - -/// Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubPolkadot::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubPolkadotSender::get(), - ASSET_MIN_BALANCE * 1000000, - ); - - // Init values for System Parachain - let destination = AssetHubPolkadot::sibling_location_of(PenpalPolkadotA::para_id()); - let beneficiary_id = PenpalPolkadotAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { - sender: AssetHubPolkadotSender::get(), - receiver: PenpalPolkadotAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); - - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_reserve_transfer_assets); - system_para_test.assert(); -} diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml deleted file mode 100644 index db58d8d3303..00000000000 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/Cargo.toml +++ /dev/null @@ -1,56 +0,0 @@ -[package] -name = "asset-hub-rococo-integration-tests" -version = "1.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Asset Hub Rococo runtime integration tests with xcm-emulator" -publish = false - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } -assert_matches = "1.5.0" - -# Substrate -sp-runtime = { path = "../../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../../substrate/frame/system", default-features = false} -pallet-balances = { path = "../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../../substrate/frame/asset-conversion", default-features = false} - -# Polkadot -polkadot-core-primitives = { path = "../../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-parachains = { path = "../../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../polkadot/xcm/xcm-executor", default-features = false} -rococo-runtime = { path = "../../../../../../polkadot/runtime/rococo", default-features = false } - -# Cumulus -asset-test-utils = { path = "../../../../runtimes/assets/test-utils", default-features = false } -parachains-common = { path = "../../../../common" } -asset-hub-rococo-runtime = { path = "../../../../runtimes/assets/asset-hub-rococo" } - -# Local -xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} -integration-tests-common = { path = "../../common", default-features = false} - -[features] -runtime-benchmarks = [ - "asset-hub-rococo-runtime/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "integration-tests-common/runtime-benchmarks", - "pallet-asset-conversion/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", - "rococo-runtime/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml deleted file mode 100644 index 2043431dcc2..00000000000 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/Cargo.toml +++ /dev/null @@ -1,71 +0,0 @@ -[package] -name = "asset-hub-westend-integration-tests" -version = "1.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Asset Hub Westend runtime integration tests with xcm-emulator" -publish = false - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } -assert_matches = "1.5.0" - -# Substrate -sp-runtime = { path = "../../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../../substrate/frame/system", default-features = false} -pallet-balances = { path = "../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-message-queue = { path = "../../../../../../substrate/frame/message-queue", default-features = false } -pallet-treasury = { path = "../../../../../../substrate/frame/treasury", default-features = false} -pallet-asset-rate = { path = "../../../../../../substrate/frame/asset-rate", default-features = false} - -# Polkadot -polkadot-core-primitives = { path = "../../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../../polkadot/runtime/common" } -polkadot-runtime-parachains = { path = "../../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../polkadot/xcm/xcm-executor", default-features = false} -pallet-xcm = { path = "../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -westend-runtime = { path = "../../../../../../polkadot/runtime/westend", default-features = false } -westend-runtime-constants = { path = "../../../../../../polkadot/runtime/westend/constants", default-features = false } - -# Cumulus -parachains-common = { path = "../../../../common" } -asset-hub-westend-runtime = { path = "../../../../runtimes/assets/asset-hub-westend" } -asset-test-utils = { path = "../../../../runtimes/assets/test-utils", default-features = false } -cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../pallets/dmp-queue" } -cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../pallets/parachain-system" } - -# Local -xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} -integration-tests-common = { path = "../../common", default-features = false} - -[features] -runtime-benchmarks = [ - "asset-hub-westend-runtime/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "integration-tests-common/runtime-benchmarks", - "pallet-asset-conversion/runtime-benchmarks", - "pallet-asset-rate/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-treasury/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "westend-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml deleted file mode 100644 index b969c27aab9..00000000000 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "bridge-hub-rococo-integration-tests" -version = "1.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Bridge Hub Rococo runtime integration tests with xcm-emulator" -publish = false - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } - -# Substrate -frame-support = { path = "../../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../../substrate/frame/system", default-features = false} -sp-core = { path = "../../../../../../substrate/primitives/core", default-features = false} -sp-weights = { path = "../../../../../../substrate/primitives/weights", default-features = false} -pallet-balances = { path = "../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../substrate/frame/assets", default-features = false} -pallet-message-queue = { path = "../../../../../../substrate/frame/message-queue", default-features = false } - -# Polkadot -polkadot-core-primitives = { path = "../../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-parachains = { path = "../../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -asset-test-utils = { path = "../../../../../parachains/runtimes/assets/test-utils", default-features = false } -parachains-common = { path = "../../../../common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../pallets/xcmp-queue", default-features = false} -cumulus-pallet-dmp-queue = { path = "../../../../../pallets/dmp-queue", default-features = false} -pallet-bridge-messages = { path = "../../../../../../bridges/modules/messages", default-features = false} -bp-messages = { path = "../../../../../../bridges/primitives/messages", default-features = false} -bridge-hub-rococo-runtime = { path = "../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } - -# Local -xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} -integration-tests-common = { path = "../../common", default-features = false} diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs deleted file mode 100644 index 3c4dbe18a9a..00000000000 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/lib.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub use bp_messages::LaneId; -pub use frame_support::assert_ok; -pub use integration_tests_common::{ - constants::{ - bridge_hub_rococo::ED as BRIDGE_HUB_ROCOCO_ED, rococo::ED as ROCOCO_ED, - PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, - }, - test_parachain_is_trusted_teleporter, - xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, - AssetHubRococo, AssetHubRococoReceiver, AssetHubWococo, BridgeHubRococo, BridgeHubRococoPallet, - BridgeHubRococoSender, BridgeHubWococo, PenpalRococoA, Rococo, RococoPallet, -}; -pub use parachains_common::{AccountId, Balance}; -pub use xcm::{ - prelude::{AccountId32 as AccountId32Junction, *}, - v3::{ - Error, - NetworkId::{Rococo as RococoId, Wococo as WococoId}, - }, -}; -pub use xcm_emulator::{ - assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, - RelayChain as Relay, Test, TestArgs, TestContext, TestExt, -}; - -pub const ASSET_ID: u32 = 1; -pub const ASSET_MIN_BALANCE: u128 = 1000; -pub const ASSETS_PALLET_ID: u8 = 50; - -pub type RelayToSystemParaTest = Test; -pub type SystemParaToRelayTest = Test; -pub type SystemParaToParaTest = Test; - -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests -pub fn relay_test_args(amount: Balance) -> TestArgs { - TestArgs { - dest: Rococo::child_location_of(AssetHubRococo::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubRococoReceiver::get().into(), - } - .into(), - amount, - assets: (Here, amount).into(), - asset_id: None, - fee_asset_item: 0, - weight_limit: WeightLimit::Unlimited, - } -} - -#[cfg(test)] -mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/Cargo.toml deleted file mode 100644 index af5408b888b..00000000000 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "bridge-hub-westend-integration-tests" -version = "1.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Bridge Hub Westend runtime integration tests with xcm-emulator" -publish = false - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } - -# Substrate -frame-support = { path = "../../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../../substrate/frame/system", default-features = false} -sp-core = { path = "../../../../../../substrate/primitives/core", default-features = false} -sp-weights = { path = "../../../../../../substrate/primitives/weights", default-features = false} -pallet-balances = { path = "../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../substrate/frame/assets", default-features = false} -pallet-message-queue = { path = "../../../../../../substrate/frame/message-queue", default-features = false } - -# Polkadot -polkadot-core-primitives = { path = "../../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-parachains = { path = "../../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -asset-test-utils = { path = "../../../../../parachains/runtimes/assets/test-utils", default-features = false } -parachains-common = { path = "../../../../common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../pallets/xcmp-queue", default-features = false} -cumulus-pallet-dmp-queue = { path = "../../../../../pallets/dmp-queue", default-features = false} -pallet-bridge-messages = { path = "../../../../../../bridges/modules/messages", default-features = false} -bp-messages = { path = "../../../../../../bridges/primitives/messages", default-features = false} -bridge-hub-westend-runtime = { path = "../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } - -# Local -xcm-emulator = { path = "../../../../../xcm/xcm-emulator", default-features = false} -integration-tests-common = { path = "../../common", default-features = false} diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/lib.rs deleted file mode 100644 index 8b17ce53eb0..00000000000 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/lib.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub use bp_messages::LaneId; -pub use frame_support::assert_ok; -pub use integration_tests_common::{ - constants::{ - bridge_hub_westend::ED as BRIDGE_HUB_WESTEND_ED, westend::ED as WESTEND_ED, - PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, - }, - test_parachain_is_trusted_teleporter, - xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, - AssetHubRococo, AssetHubWestend, AssetHubWestendReceiver, BridgeHubRococo, BridgeHubWestend, - BridgeHubWestendPallet, BridgeHubWestendSender, PenpalWestendA, Westend, WestendPallet, -}; -pub use parachains_common::{AccountId, Balance}; -pub use xcm::{ - prelude::{AccountId32 as AccountId32Junction, *}, - v3::{ - Error, - NetworkId::{Rococo as RococoId, Westend as WestendId}, - }, -}; -pub use xcm_emulator::{ - assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, - RelayChain as Relay, Test, TestArgs, TestContext, TestExt, -}; - -pub const ASSET_ID: u32 = 1; -pub const ASSET_MIN_BALANCE: u128 = 1000; -pub const ASSETS_PALLET_ID: u8 = 50; - -pub type RelayToSystemParaTest = Test; -pub type SystemParaToRelayTest = Test; -pub type SystemParaToParaTest = Test; - -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests -pub fn relay_test_args(amount: Balance) -> TestArgs { - TestArgs { - dest: Westend::child_location_of(AssetHubWestend::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubWestendReceiver::get().into(), - } - .into(), - amount, - assets: (Here, amount).into(), - asset_id: None, - fee_asset_item: 0, - weight_limit: WeightLimit::Unlimited, - } -} - -#[cfg(test)] -mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml new file mode 100644 index 00000000000..dbf7e9c9a70 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "asset-hub-rococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Rococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } +rococo-emulated-chain = { path = "../../../relays/rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs new file mode 100644 index 00000000000..efc94722d90 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -0,0 +1,70 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::storage::Storage; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage_legacy, collators, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1000; +pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = asset_hub_rococo_runtime::RuntimeGenesisConfig { + system: asset_hub_rococo_runtime::SystemConfig::default(), + balances: asset_hub_rococo_runtime::BalancesConfig { + balances: accounts::init_balances() + .iter() + .cloned() + .map(|k| (k, ED * 4096 * 4096)) + .collect(), + }, + parachain_info: asset_hub_rococo_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: asset_hub_rococo_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: asset_hub_rococo_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + asset_hub_rococo_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: asset_hub_rococo_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage_legacy( + &genesis_config, + asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs new file mode 100644 index 00000000000..0580d61eae9 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -0,0 +1,55 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, +}; +use rococo_emulated_chain::Rococo; + +// AssetHubRococo Parachain declaration +decl_test_parachains! { + pub struct AssetHubRococo { + genesis = genesis::genesis(), + on_init = { + asset_hub_rococo_runtime::AuraExt::on_initialize(1); + }, + runtime = asset_hub_rococo_runtime, + core = { + XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, + LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, + ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, + Assets: asset_hub_rococo_runtime::Assets, + ForeignAssets: asset_hub_rococo_runtime::ForeignAssets, + PoolAssets: asset_hub_rococo_runtime::PoolAssets, + AssetConversion: asset_hub_rococo_runtime::AssetConversion, + Balances: asset_hub_rococo_runtime::Balances, + } + }, +} + +// AssetHubRococo implementation +impl_accounts_helpers_for_parachain!(AssetHubRococo); +impl_assert_events_helpers_for_parachain!(AssetHubRococo); +impl_assets_helpers_for_system_parachain!(AssetHubRococo, Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml new file mode 100644 index 00000000000..0ff817b6b96 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "asset-hub-westend-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Westend emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +asset-hub-westend-runtime = { path = "../../../../../../runtimes/assets/asset-hub-westend" } +westend-emulated-chain = { path = "../../../relays/westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs new file mode 100644 index 00000000000..19e7a60b030 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -0,0 +1,67 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::storage::Storage; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage_legacy, collators, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1000; +pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = asset_hub_westend_runtime::RuntimeGenesisConfig { + system: asset_hub_westend_runtime::SystemConfig::default(), + balances: asset_hub_westend_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: asset_hub_westend_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: asset_hub_westend_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: asset_hub_westend_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + asset_hub_westend_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: asset_hub_westend_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage_legacy( + &genesis_config, + asset_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs new file mode 100644 index 00000000000..804b727c33f --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -0,0 +1,55 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, +}; +use westend_emulated_chain::Westend; + +// AssetHubWestend Parachain declaration +decl_test_parachains! { + pub struct AssetHubWestend { + genesis = genesis::genesis(), + on_init = { + asset_hub_westend_runtime::AuraExt::on_initialize(1); + }, + runtime = asset_hub_westend_runtime, + core = { + XcmpMessageHandler: asset_hub_westend_runtime::XcmpQueue, + LocationToAccountId: asset_hub_westend_runtime::xcm_config::LocationToAccountId, + ParachainInfo: asset_hub_westend_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: asset_hub_westend_runtime::PolkadotXcm, + Balances: asset_hub_westend_runtime::Balances, + Assets: asset_hub_westend_runtime::Assets, + ForeignAssets: asset_hub_westend_runtime::ForeignAssets, + PoolAssets: asset_hub_westend_runtime::PoolAssets, + AssetConversion: asset_hub_westend_runtime::AssetConversion, + } + }, +} + +// AssetHubWestend implementation +impl_accounts_helpers_for_parachain!(AssetHubWestend); +impl_assert_events_helpers_for_parachain!(AssetHubWestend); +impl_assets_helpers_for_system_parachain!(AssetHubWestend, Westend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml new file mode 100644 index 00000000000..0f212c15999 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "asset-hub-wococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Wococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } +wococo-emulated-chain = { path = "../../../relays/wococo" } +asset-hub-rococo-emulated-chain = { path = "../asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs new file mode 100644 index 00000000000..677ca1763cf --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs @@ -0,0 +1,53 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, +}; +use wococo_emulated_chain::Wococo; + +// AssetHubWococo Parachain declaration +decl_test_parachains! { + pub struct AssetHubWococo { + genesis = asset_hub_rococo_emulated_chain::genesis::genesis(), + on_init = { + asset_hub_rococo_runtime::AuraExt::on_initialize(1); + }, + runtime = asset_hub_rococo_runtime, + core = { + XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, + LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, + ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, + Assets: asset_hub_rococo_runtime::Assets, + ForeignAssets: asset_hub_rococo_runtime::ForeignAssets, + PoolAssets: asset_hub_rococo_runtime::PoolAssets, + AssetConversion: asset_hub_rococo_runtime::AssetConversion, + Balances: asset_hub_rococo_runtime::Balances, + } + }, +} + +// AssetHubWococo implementation +impl_accounts_helpers_for_parachain!(AssetHubWococo); +impl_assert_events_helpers_for_parachain!(AssetHubWococo); +impl_assets_helpers_for_system_parachain!(AssetHubWococo, Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml new file mode 100644 index 00000000000..43c0f5fd14c --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "bridge-hub-rococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Rococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs new file mode 100644 index 00000000000..299c5909670 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs @@ -0,0 +1,83 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::{sr25519, storage::Storage}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage_legacy, collators, get_account_id_from_seed, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1013; +pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = bridge_hub_rococo_runtime::RuntimeGenesisConfig { + system: bridge_hub_rococo_runtime::SystemConfig::default(), + balances: bridge_hub_rococo_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: bridge_hub_rococo_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: bridge_hub_rococo_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: bridge_hub_rococo_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + bridge_hub_rococo_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: bridge_hub_rococo_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + bridge_wococo_grandpa: bridge_hub_rococo_runtime::BridgeWococoGrandpaConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + bridge_rococo_grandpa: bridge_hub_rococo_runtime::BridgeRococoGrandpaConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + bridge_rococo_messages: bridge_hub_rococo_runtime::BridgeRococoMessagesConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + bridge_wococo_messages: bridge_hub_rococo_runtime::BridgeWococoMessagesConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage_legacy( + &genesis_config, + bridge_hub_rococo_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs new file mode 100644 index 00000000000..d7630954c86 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -0,0 +1,49 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + xcm_emulator::decl_test_parachains, +}; + +// BridgeHubRococo Parachain declaration +decl_test_parachains! { + pub struct BridgeHubRococo { + genesis = genesis::genesis(), + on_init = { + bridge_hub_rococo_runtime::AuraExt::on_initialize(1); + }, + runtime = bridge_hub_rococo_runtime, + core = { + XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, + LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, + ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, + Balances: bridge_hub_rococo_runtime::Balances, + } + }, +} + +// BridgeHubRococo implementation +impl_accounts_helpers_for_parachain!(BridgeHubRococo); +impl_assert_events_helpers_for_parachain!(BridgeHubRococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml new file mode 100644 index 00000000000..e5e6fd70739 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "bridge-hub-westend-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Westend emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +bridge-hub-westend-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs new file mode 100644 index 00000000000..5acd3eb2c60 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::{sr25519, storage::Storage}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage_legacy, collators, get_account_id_from_seed, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +pub const PARA_ID: u32 = 1013; +pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; + +pub fn genesis() -> Storage { + let genesis_config = bridge_hub_westend_runtime::RuntimeGenesisConfig { + system: bridge_hub_westend_runtime::SystemConfig::default(), + balances: bridge_hub_westend_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: bridge_hub_westend_runtime::ParachainInfoConfig { + parachain_id: PARA_ID.into(), + ..Default::default() + }, + collator_selection: bridge_hub_westend_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: bridge_hub_westend_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + bridge_hub_westend_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: bridge_hub_westend_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + bridge_rococo_grandpa: bridge_hub_westend_runtime::BridgeRococoGrandpaConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + bridge_rococo_messages: bridge_hub_westend_runtime::BridgeRococoMessagesConfig { + owner: Some(get_account_id_from_seed::(accounts::BOB)), + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage_legacy( + &genesis_config, + bridge_hub_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs new file mode 100644 index 00000000000..436b65cb916 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -0,0 +1,49 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + xcm_emulator::decl_test_parachains, +}; + +// BridgeHubWestend Parachain declaration +decl_test_parachains! { + pub struct BridgeHubWestend { + genesis = genesis::genesis(), + on_init = { + bridge_hub_westend_runtime::AuraExt::on_initialize(1); + }, + runtime = bridge_hub_westend_runtime, + core = { + XcmpMessageHandler: bridge_hub_westend_runtime::XcmpQueue, + LocationToAccountId: bridge_hub_westend_runtime::xcm_config::LocationToAccountId, + ParachainInfo: bridge_hub_westend_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: bridge_hub_westend_runtime::PolkadotXcm, + Balances: bridge_hub_westend_runtime::Balances, + } + }, +} + +// BridgeHubWestend implementation +impl_accounts_helpers_for_parachain!(BridgeHubWestend); +impl_assert_events_helpers_for_parachain!(BridgeHubWestend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml new file mode 100644 index 00000000000..0b02730a50c --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "bridge-hub-wococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Wococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } +bridge-hub-rococo-emulated-chain = { path = "../bridge-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs new file mode 100644 index 00000000000..6807a2ab8c8 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs @@ -0,0 +1,47 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + xcm_emulator::decl_test_parachains, +}; + +// BridgeHubWococo Parachain declaration +decl_test_parachains! { + pub struct BridgeHubWococo { + genesis = bridge_hub_rococo_emulated_chain::genesis::genesis(), + on_init = { + bridge_hub_rococo_runtime::AuraExt::on_initialize(1); + }, + runtime = bridge_hub_rococo_runtime, + core = { + XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, + LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, + ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, + Balances: bridge_hub_rococo_runtime::Balances, + } + }, +} + +// BridgeHubWococo implementation +impl_accounts_helpers_for_parachain!(BridgeHubWococo); +impl_assert_events_helpers_for_parachain!(BridgeHubWococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml new file mode 100644 index 00000000000..42aaee3f102 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "penpal-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Penpal emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } + +# Polakadot +parachains-common = { path = "../../../../../../../parachains/common" } + +# Cumulus +cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } +emulated-integration-tests-common = { path = "../../../../common", default-features = false } +penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs new file mode 100644 index 00000000000..8b03b599d5f --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -0,0 +1,71 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use sp_core::{sr25519, storage::Storage}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage_legacy, collators, get_account_id_from_seed, SAFE_XCM_VERSION, +}; +use parachains_common::Balance; + +// Penpal +pub const PARA_ID_A: u32 = 2000; +pub const PARA_ID_B: u32 = 2001; +pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; + +pub fn genesis(para_id: u32) -> Storage { + let genesis_config = penpal_runtime::RuntimeGenesisConfig { + system: penpal_runtime::SystemConfig::default(), + balances: penpal_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, + parachain_info: penpal_runtime::ParachainInfoConfig { + parachain_id: para_id.into(), + ..Default::default() + }, + collator_selection: penpal_runtime::CollatorSelectionConfig { + invulnerables: collators::invulnerables().iter().cloned().map(|(acc, _)| acc).collect(), + candidacy_bond: ED * 16, + ..Default::default() + }, + session: penpal_runtime::SessionConfig { + keys: collators::invulnerables() + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + penpal_runtime::SessionKeys { aura }, // session keys + ) + }) + .collect(), + }, + polkadot_xcm: penpal_runtime::PolkadotXcmConfig { + safe_xcm_version: Some(SAFE_XCM_VERSION), + ..Default::default() + }, + sudo: penpal_runtime::SudoConfig { + key: Some(get_account_id_from_seed::("Alice")), + }, + ..Default::default() + }; + + build_genesis_storage_legacy( + &genesis_config, + penpal_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + ) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs new file mode 100644 index 00000000000..8709d4e9196 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -0,0 +1,65 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod genesis; +pub use genesis::{genesis, ED, PARA_ID_A, PARA_ID_B}; + +// Substrate +use frame_support::traits::OnInitialize; + +// Cumulus +use emulated_integration_tests_common::{ + impl_assert_events_helpers_for_parachain, xcm_emulator::decl_test_parachains, +}; + +// Penpal Parachain declaration +decl_test_parachains! { + pub struct PenpalA { + genesis = genesis(PARA_ID_A), + on_init = { + penpal_runtime::AuraExt::on_initialize(1); + }, + runtime = penpal_runtime, + core = { + XcmpMessageHandler: penpal_runtime::XcmpQueue, + LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, + ParachainInfo: penpal_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: penpal_runtime::PolkadotXcm, + Assets: penpal_runtime::Assets, + } + }, + pub struct PenpalB { + genesis = genesis(PARA_ID_B), + on_init = { + penpal_runtime::AuraExt::on_initialize(1); + }, + runtime = penpal_runtime, + core = { + XcmpMessageHandler: penpal_runtime::XcmpQueue, + LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, + ParachainInfo: penpal_runtime::ParachainInfo, + }, + pallets = { + PolkadotXcm: penpal_runtime::PolkadotXcm, + Assets: penpal_runtime::Assets, + } + }, +} + +// Penpal implementation +impl_assert_events_helpers_for_parachain!(PenpalA); +impl_assert_events_helpers_for_parachain!(PenpalB); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml new file mode 100644 index 00000000000..325c7229517 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "rococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Rococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } +sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } +beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } +grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +pallet-im-online = { path = "../../../../../../../substrate/frame/im-online", default-features = false } + +# Polkadot +polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } +rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants", default-features = false } +rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs new file mode 100644 index 00000000000..876df212e4c --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -0,0 +1,101 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; +use grandpa::AuthorityId as GrandpaId; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::{sr25519, storage::Storage}; + +// Polkadot +use polkadot_primitives::{AssignmentId, ValidatorId}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage_legacy, get_account_id_from_seed, get_from_seed, + get_host_config, validators, +}; +use parachains_common::Balance; +use rococo_runtime_constants::currency::UNITS as ROC; + +pub const ED: Balance = rococo_runtime_constants::currency::EXISTENTIAL_DEPOSIT; +const ENDOWMENT: u128 = 1_000_000 * ROC; + +fn session_keys( + babe: BabeId, + grandpa: GrandpaId, + im_online: ImOnlineId, + para_validator: ValidatorId, + para_assignment: AssignmentId, + authority_discovery: AuthorityDiscoveryId, + beefy: BeefyId, +) -> rococo_runtime::SessionKeys { + rococo_runtime::SessionKeys { + babe, + grandpa, + im_online, + para_validator, + para_assignment, + authority_discovery, + beefy, + } +} + +pub fn genesis() -> Storage { + let genesis_config = rococo_runtime::RuntimeGenesisConfig { + system: rococo_runtime::SystemConfig::default(), + balances: rococo_runtime::BalancesConfig { + balances: accounts::init_balances().iter().map(|k| (k.clone(), ENDOWMENT)).collect(), + }, + session: rococo_runtime::SessionConfig { + keys: validators::initial_authorities() + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + get_from_seed::("Alice"), + ), + ) + }) + .collect::>(), + }, + babe: rococo_runtime::BabeConfig { + authorities: Default::default(), + epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), + ..Default::default() + }, + sudo: rococo_runtime::SudoConfig { + key: Some(get_account_id_from_seed::("Alice")), + }, + configuration: rococo_runtime::ConfigurationConfig { config: get_host_config() }, + registrar: rococo_runtime::RegistrarConfig { + next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, + ..Default::default() + }, + ..Default::default() + }; + + build_genesis_storage_legacy(&genesis_config, rococo_runtime::WASM_BINARY.unwrap()) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs new file mode 100644 index 00000000000..f806f4a5d9e --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -0,0 +1,48 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_relay_chain, impl_assert_events_helpers_for_relay_chain, + impl_hrmp_channels_helpers_for_relay_chain, impl_send_transact_helpers_for_relay_chain, + xcm_emulator::decl_test_relay_chains, +}; + +// Rococo declaration +decl_test_relay_chains! { + #[api_version(8)] + pub struct Rococo { + genesis = genesis::genesis(), + on_init = (), + runtime = rococo_runtime, + core = { + SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, + }, + pallets = { + XcmPallet: rococo_runtime::XcmPallet, + Sudo: rococo_runtime::Sudo, + Balances: rococo_runtime::Balances, + Hrmp: rococo_runtime::Hrmp, + } + }, +} + +// Rococo implementation +impl_accounts_helpers_for_relay_chain!(Rococo); +impl_assert_events_helpers_for_relay_chain!(Rococo); +impl_hrmp_channels_helpers_for_relay_chain!(Rococo); +impl_send_transact_helpers_for_relay_chain!(Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml new file mode 100644 index 00000000000..20b9737735f --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "westend-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Westend emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } +sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } +beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } +grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +pallet-im-online = { path = "../../../../../../../substrate/frame/im-online", default-features = false } +pallet-staking = { path = "../../../../../../../substrate/frame/staking", default-features = false } + +# Polkadot +polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } +westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants", default-features = false } +westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs new file mode 100644 index 00000000000..8bdc34f0eed --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -0,0 +1,109 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; +use grandpa::AuthorityId as GrandpaId; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::storage::Storage; +use sp_runtime::Perbill; + +// Polkadot +use polkadot_primitives::{AssignmentId, ValidatorId}; + +// Cumulus +use emulated_integration_tests_common::{ + accounts, build_genesis_storage_legacy, get_from_seed, get_host_config, validators, +}; +use parachains_common::Balance; +use westend_runtime_constants::currency::UNITS as WND; + +pub const ED: Balance = westend_runtime_constants::currency::EXISTENTIAL_DEPOSIT; +const ENDOWMENT: u128 = 1_000_000 * WND; +const STASH: u128 = 100 * WND; + +fn session_keys( + babe: BabeId, + grandpa: GrandpaId, + im_online: ImOnlineId, + para_validator: ValidatorId, + para_assignment: AssignmentId, + authority_discovery: AuthorityDiscoveryId, + beefy: BeefyId, +) -> westend_runtime::SessionKeys { + westend_runtime::SessionKeys { + babe, + grandpa, + im_online, + para_validator, + para_assignment, + authority_discovery, + beefy, + } +} + +pub fn genesis() -> Storage { + let genesis_config = westend_runtime::RuntimeGenesisConfig { + system: westend_runtime::SystemConfig::default(), + balances: westend_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ENDOWMENT)).collect(), + }, + session: westend_runtime::SessionConfig { + keys: validators::initial_authorities() + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + get_from_seed::("Alice"), + ), + ) + }) + .collect::>(), + }, + staking: westend_runtime::StakingConfig { + validator_count: validators::initial_authorities().len() as u32, + minimum_validator_count: 1, + stakers: validators::initial_authorities() + .iter() + .map(|x| { + (x.0.clone(), x.1.clone(), STASH, westend_runtime::StakerStatus::Validator) + }) + .collect(), + invulnerables: validators::initial_authorities().iter().map(|x| x.0.clone()).collect(), + force_era: pallet_staking::Forcing::ForceNone, + slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() + }, + babe: westend_runtime::BabeConfig { + authorities: Default::default(), + epoch_config: Some(westend_runtime::BABE_GENESIS_EPOCH_CONFIG), + ..Default::default() + }, + configuration: westend_runtime::ConfigurationConfig { config: get_host_config() }, + ..Default::default() + }; + + build_genesis_storage_legacy(&genesis_config, westend_runtime::WASM_BINARY.unwrap()) +} diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs new file mode 100644 index 00000000000..af45d8db4e6 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod genesis; + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_relay_chain, impl_assert_events_helpers_for_relay_chain, + impl_hrmp_channels_helpers_for_relay_chain, impl_send_transact_helpers_for_relay_chain, + xcm_emulator::decl_test_relay_chains, +}; + +// Westend declaration +decl_test_relay_chains! { + #[api_version(8)] + pub struct Westend { + genesis = genesis::genesis(), + on_init = (), + runtime = westend_runtime, + core = { + SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, + }, + pallets = { + XcmPallet: westend_runtime::XcmPallet, + Sudo: westend_runtime::Sudo, + Balances: westend_runtime::Balances, + Treasury: westend_runtime::Treasury, + AssetRate: westend_runtime::AssetRate, + Hrmp: westend_runtime::Hrmp, + } + }, +} + +// Westend implementation +impl_accounts_helpers_for_relay_chain!(Westend); +impl_assert_events_helpers_for_relay_chain!(Westend); +impl_hrmp_channels_helpers_for_relay_chain!(Westend); +impl_send_transact_helpers_for_relay_chain!(Westend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml new file mode 100644 index 00000000000..51a87954b8c --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "wococo-emulated-chain" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Wococo emulated chain" +publish = false + +[dependencies] +serde_json = "1.0.104" + +# Substrate +sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } +sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } +beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } +grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } +pallet-im-online = { path = "../../../../../../../substrate/frame/im-online", default-features = false } + +# Polkadot +polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } +rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants", default-features = false } +rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +emulated-integration-tests-common = { path = "../../../common", default-features = false } +rococo-emulated-chain = { path = "../rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs new file mode 100644 index 00000000000..a04deee330f --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs @@ -0,0 +1,46 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Cumulus +use emulated_integration_tests_common::{ + impl_accounts_helpers_for_relay_chain, impl_assert_events_helpers_for_relay_chain, + impl_hrmp_channels_helpers_for_relay_chain, impl_send_transact_helpers_for_relay_chain, + xcm_emulator::decl_test_relay_chains, +}; + +// Wococo declaration +decl_test_relay_chains! { + #[api_version(8)] + pub struct Wococo { + genesis = rococo_emulated_chain::genesis::genesis(), + on_init = (), + runtime = rococo_runtime, + core = { + SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, + }, + pallets = { + XcmPallet: rococo_runtime::XcmPallet, + Sudo: rococo_runtime::Sudo, + Balances: rococo_runtime::Balances, + Hrmp: rococo_runtime::Hrmp, + } + }, +} + +// Wococo implementation +impl_accounts_helpers_for_relay_chain!(Wococo); +impl_assert_events_helpers_for_relay_chain!(Wococo); +impl_hrmp_channels_helpers_for_relay_chain!(Wococo); +impl_send_transact_helpers_for_relay_chain!(Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 16a37e94be5..08bb284cded 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -1,11 +1,10 @@ [package] -name = "integration-tests-common" +name = "emulated-integration-tests-common" version = "1.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" description = "Common resources for integration testing with xcm-emulator" -publish = false [dependencies] codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } @@ -21,75 +20,26 @@ sp-core = { path = "../../../../../substrate/primitives/core", default-features sp-consensus-babe = { path = "../../../../../substrate/primitives/consensus/babe", default-features = false} pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-staking = { path = "../../../../../substrate/frame/staking", default-features = false} pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} pallet-im-online = { path = "../../../../../substrate/frame/im-online", default-features = false} beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../substrate/primitives/consensus/beefy" } -sc-chain-spec = { path = "../../../../../substrate/client/chain-spec", default-features = false } # Polkadot -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} polkadot-service = { path = "../../../../../polkadot/node/service", default-features = false, features = ["full-node"] } polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false} polkadot-runtime-parachains = { path = "../../../../../polkadot/runtime/parachains" } -rococo-runtime = { path = "../../../../../polkadot/runtime/rococo" } -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants" } -westend-runtime = { path = "../../../../../polkadot/runtime/westend" } -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants" } xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} # Cumulus parachains-common = { path = "../../../common" } cumulus-primitives-core = { path = "../../../../primitives/core" } -penpal-runtime = { path = "../../../runtimes/testing/penpal" } -asset-hub-polkadot-runtime = { path = "../../../runtimes/assets/asset-hub-polkadot" } -asset-hub-kusama-runtime = { path = "../../../runtimes/assets/asset-hub-kusama" } -asset-hub-rococo-runtime = { path = "../../../runtimes/assets/asset-hub-rococo" } -asset-hub-westend-runtime = { path = "../../../runtimes/assets/asset-hub-westend" } -collectives-polkadot-runtime = { path = "../../../runtimes/collectives/collectives-polkadot" } -bridge-hub-kusama-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-kusama" } -bridge-hub-polkadot-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-polkadot" } -bridge-hub-rococo-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-rococo" } -bridge-hub-westend-runtime = { path = "../../../runtimes/bridge-hubs/bridge-hub-westend" } -xcm-emulator = { default-features = false, path = "../../../../xcm/xcm-emulator" } -cumulus-pallet-xcmp-queue = { default-features = false, path = "../../../../pallets/xcmp-queue" } +xcm-emulator = { path = "../../../../xcm/xcm-emulator", default-features = false} +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false} cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system" } +asset-test-utils = { path = "../../../runtimes/assets/test-utils" } + +# Bridges bp-messages = { path = "../../../../../bridges/primitives/messages" } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages" } bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common" } - -[features] -runtime-benchmarks = [ - "asset-hub-kusama-runtime/runtime-benchmarks", - "asset-hub-polkadot-runtime/runtime-benchmarks", - "asset-hub-rococo-runtime/runtime-benchmarks", - "asset-hub-westend-runtime/runtime-benchmarks", - "bridge-hub-kusama-runtime/runtime-benchmarks", - "bridge-hub-polkadot-runtime/runtime-benchmarks", - "bridge-hub-rococo-runtime/runtime-benchmarks", - "bridge-hub-westend-runtime/runtime-benchmarks", - "bridge-runtime-common/runtime-benchmarks", - "collectives-polkadot-runtime/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-bridge-messages/runtime-benchmarks", - "pallet-im-online/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-staking/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "penpal-runtime/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-primitives/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", - "polkadot-service/runtime-benchmarks", - "rococo-runtime/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "westend-runtime/runtime-benchmarks", -] diff --git a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs b/cumulus/parachains/integration-tests/emulated/common/src/constants.rs deleted file mode 100644 index 1dffbccbba3..00000000000 --- a/cumulus/parachains/integration-tests/emulated/common/src/constants.rs +++ /dev/null @@ -1,1219 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Substrate -use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; -use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -use sc_chain_spec::GenesisConfigBuilderRuntimeCaller; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_consensus_babe::AuthorityId as BabeId; -use sp_core::{sr25519, storage::Storage, Pair, Public}; -#[cfg(test)] -use sp_runtime::BuildStorage; -use sp_runtime::{ - traits::{IdentifyAccount, Verify}, - MultiSignature, Perbill, -}; - -// Cumulus -use parachains_common::{AccountId, AssetHubPolkadotAuraId, AuraId, Balance, BlockNumber}; -use polkadot_parachain_primitives::primitives::{HeadData, ValidationCode}; -use polkadot_primitives::{AssignmentId, ValidatorId}; -use polkadot_runtime_parachains::{ - configuration::HostConfiguration, - paras::{ParaGenesisArgs, ParaKind}, -}; -use polkadot_service::chain_spec::get_authority_keys_from_seed_no_beefy; -use xcm; - -pub const XCM_V2: u32 = 3; -pub const XCM_V3: u32 = 2; -pub const REF_TIME_THRESHOLD: u64 = 33; -pub const PROOF_SIZE_THRESHOLD: u64 = 33; - -type AccountPublic = ::Signer; - -/// Helper function to generate a crypto pair from seed -fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -/// Helper function to generate an account ID from seed. -fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Helper function to build the genesis storage using given json patch and code -fn build_genesis_storage(patch: serde_json::Value, code: &[u8]) -> Storage { - let mut storage = GenesisConfigBuilderRuntimeCaller::new(code) - .get_storage_for_patch(patch) - .unwrap(); - storage - .top - .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.into()); - storage -} - -#[cfg(test)] -/// Helper function used in tests to build the genesis storage using given RuntimeGenesisConfig and -/// code Used in `legacy_vs_json_check` submods to verify storage building with JSON patch against -/// building with RuntimeGenesisConfig struct. -fn build_genesis_storage_legacy(builder: &dyn BuildStorage, code: &[u8]) -> Storage { - let mut storage = builder.build_storage().unwrap(); - storage - .top - .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.into()); - storage -} - -pub mod accounts { - use super::*; - pub const ALICE: &str = "Alice"; - pub const BOB: &str = "Bob"; - pub const CHARLIE: &str = "Charlie"; - pub const DAVE: &str = "Dave"; - pub const EVE: &str = "Eve"; - pub const FERDIE: &str = "Ferdei"; - pub const ALICE_STASH: &str = "Alice//stash"; - pub const BOB_STASH: &str = "Bob//stash"; - pub const CHARLIE_STASH: &str = "Charlie//stash"; - pub const DAVE_STASH: &str = "Dave//stash"; - pub const EVE_STASH: &str = "Eve//stash"; - pub const FERDIE_STASH: &str = "Ferdie//stash"; - pub const FERDIE_BEEFY: &str = "Ferdie//stash"; - - pub fn init_balances() -> Vec { - vec![ - get_account_id_from_seed::(ALICE), - get_account_id_from_seed::(BOB), - get_account_id_from_seed::(CHARLIE), - get_account_id_from_seed::(DAVE), - get_account_id_from_seed::(EVE), - get_account_id_from_seed::(FERDIE), - get_account_id_from_seed::(ALICE_STASH), - get_account_id_from_seed::(BOB_STASH), - get_account_id_from_seed::(CHARLIE_STASH), - get_account_id_from_seed::(DAVE_STASH), - get_account_id_from_seed::(EVE_STASH), - get_account_id_from_seed::(FERDIE_STASH), - ] - } -} - -pub mod collators { - use super::*; - - pub fn invulnerables_asset_hub_polkadot() -> Vec<(AccountId, AssetHubPolkadotAuraId)> { - vec![ - ( - get_account_id_from_seed::("Alice"), - get_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_from_seed::("Bob"), - ), - ] - } - - pub fn invulnerables() -> Vec<(AccountId, AuraId)> { - vec![ - ( - get_account_id_from_seed::("Alice"), - get_from_seed::("Alice"), - ), - (get_account_id_from_seed::("Bob"), get_from_seed::("Bob")), - ] - } -} - -pub mod validators { - use super::*; - - pub fn initial_authorities() -> Vec<( - AccountId, - AccountId, - BabeId, - GrandpaId, - ImOnlineId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - )> { - vec![get_authority_keys_from_seed_no_beefy("Alice")] - } -} - -/// The default XCM version to set in genesis config. -const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; - -// Westend -pub mod westend { - use super::*; - use westend_runtime_constants::currency::UNITS as WND; - pub const ED: Balance = westend_runtime_constants::currency::EXISTENTIAL_DEPOSIT; - const ENDOWMENT: u128 = 1_000_000 * WND; - const STASH: u128 = 100 * WND; - - pub fn get_host_config() -> HostConfiguration { - HostConfiguration { - max_upward_queue_count: 10, - max_upward_queue_size: 51200, - max_upward_message_size: 51200, - max_upward_message_num_per_candidate: 10, - max_downward_message_size: 51200, - hrmp_sender_deposit: 100_000_000_000, - hrmp_recipient_deposit: 100_000_000_000, - hrmp_channel_max_capacity: 1000, - hrmp_channel_max_message_size: 102400, - hrmp_channel_max_total_size: 102400, - hrmp_max_parachain_outbound_channels: 30, - hrmp_max_parachain_inbound_channels: 30, - ..Default::default() - } - } - - fn session_keys( - babe: BabeId, - grandpa: GrandpaId, - im_online: ImOnlineId, - para_validator: ValidatorId, - para_assignment: AssignmentId, - authority_discovery: AuthorityDiscoveryId, - beefy: BeefyId, - ) -> westend_runtime::SessionKeys { - westend_runtime::SessionKeys { - babe, - grandpa, - im_online, - para_validator, - para_assignment, - authority_discovery, - beefy, - } - } - - pub fn genesis() -> Storage { - let genesis_config = serde_json::json!({ - "balances": { - "balances": accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ENDOWMENT)) - .collect::>(), - }, - "session": { - "keys": validators::initial_authorities() - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - westend::session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - get_from_seed::("Alice"), - ), - ) - }) - .collect::>(), - }, - "staking": { - "validatorCount": validators::initial_authorities().len() as u32, - "minimumValidatorCount": 1, - "stakers": validators::initial_authorities() - .iter() - .map(|x| { - (x.0.clone(), x.1.clone(), STASH, westend_runtime::StakerStatus::::Validator) - }) - .collect::>(), - "invulnerables": validators::initial_authorities() - .iter() - .map(|x| x.0.clone()) - .collect::>(), - "forceEra": pallet_staking::Forcing::ForceNone, - "slashRewardFraction": Perbill::from_percent(10), - }, - "babe": { - "epochConfig": Some(westend_runtime::BABE_GENESIS_EPOCH_CONFIG), - }, - "configuration": { "config": get_host_config() }, - }); - - build_genesis_storage(genesis_config, westend_runtime::WASM_BINARY.unwrap()) - } - - #[cfg(test)] - mod legacy_vs_json_check { - use super::*; - fn genesis() -> Storage { - let genesis_config = westend_runtime::RuntimeGenesisConfig { - system: westend_runtime::SystemConfig::default(), - balances: westend_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ENDOWMENT)) - .collect(), - }, - session: westend_runtime::SessionConfig { - keys: validators::initial_authorities() - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - westend::session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - get_from_seed::("Alice"), - ), - ) - }) - .collect::>(), - }, - staking: westend_runtime::StakingConfig { - validator_count: validators::initial_authorities().len() as u32, - minimum_validator_count: 1, - stakers: validators::initial_authorities() - .iter() - .map(|x| { - ( - x.0.clone(), - x.1.clone(), - STASH, - westend_runtime::StakerStatus::Validator, - ) - }) - .collect(), - invulnerables: validators::initial_authorities() - .iter() - .map(|x| x.0.clone()) - .collect(), - force_era: pallet_staking::Forcing::ForceNone, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - }, - babe: westend_runtime::BabeConfig { - authorities: Default::default(), - epoch_config: Some(westend_runtime::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - configuration: westend_runtime::ConfigurationConfig { config: get_host_config() }, - ..Default::default() - }; - - build_genesis_storage_legacy(&genesis_config, westend_runtime::WASM_BINARY.unwrap()) - } - - #[test] - fn test_genesis() { - let j1 = super::genesis(); - let j2 = genesis(); - - assert_eq!(j1.top, j2.top); - assert_eq!(j1.children_default, j2.children_default); - } - } -} - -// Rococo -pub mod rococo { - use super::*; - pub const ED: Balance = rococo_runtime_constants::currency::EXISTENTIAL_DEPOSIT; - use rococo_runtime_constants::currency::UNITS as ROC; - const ENDOWMENT: u128 = 1_000_000 * ROC; - - pub fn get_host_config() -> HostConfiguration { - HostConfiguration { - max_upward_queue_count: 10, - max_upward_queue_size: 51200, - max_upward_message_size: 51200, - max_upward_message_num_per_candidate: 10, - max_downward_message_size: 51200, - hrmp_sender_deposit: 0, - hrmp_recipient_deposit: 0, - hrmp_channel_max_capacity: 1000, - hrmp_channel_max_message_size: 102400, - hrmp_channel_max_total_size: 102400, - hrmp_max_parachain_outbound_channels: 30, - hrmp_max_parachain_inbound_channels: 30, - ..Default::default() - } - } - - fn session_keys( - babe: BabeId, - grandpa: GrandpaId, - im_online: ImOnlineId, - para_validator: ValidatorId, - para_assignment: AssignmentId, - authority_discovery: AuthorityDiscoveryId, - beefy: BeefyId, - ) -> rococo_runtime::SessionKeys { - rococo_runtime::SessionKeys { - babe, - grandpa, - im_online, - para_validator, - para_assignment, - authority_discovery, - beefy, - } - } - - pub fn genesis() -> Storage { - let genesis_config = serde_json::json!({ - "balances": { - "balances": accounts::init_balances() - .iter() - .map(|k| (k.clone(), ENDOWMENT)) - .collect::>(), - }, - // indices: rococo_runtime::IndicesConfig { indices: vec![] }, - "session": { - "keys": validators::initial_authorities() - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - get_from_seed::("Alice"), - ), - ) - }) - .collect::>(), - }, - "babe": { - "epochConfig": Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), - }, - "sudo": { - "key": Some(get_account_id_from_seed::("Alice")), - }, - "configuration": { "config": get_host_config() }, - "paras": rococo_runtime::ParasConfig { - paras: vec![ - ( - asset_hub_rococo::PARA_ID.into(), - ParaGenesisArgs { - genesis_head: HeadData::default(), - validation_code: ValidationCode( - asset_hub_rococo_runtime::WASM_BINARY.unwrap().to_vec(), - ), - para_kind: ParaKind::Parachain, - }, - ), - ( - penpal::PARA_ID_A.into(), - ParaGenesisArgs { - genesis_head: HeadData::default(), - validation_code: ValidationCode( - penpal_runtime::WASM_BINARY.unwrap().to_vec(), - ), - para_kind: ParaKind::Parachain, - }, - ), - ( - penpal::PARA_ID_B.into(), - ParaGenesisArgs { - genesis_head: HeadData::default(), - validation_code: ValidationCode( - penpal_runtime::WASM_BINARY.unwrap().to_vec(), - ), - para_kind: ParaKind::Parachain, - }, - ), - ], - ..Default::default() - }, - "registrar": { - "nextFreeParaId": polkadot_primitives::LOWEST_PUBLIC_ID, - }, - }); - - build_genesis_storage(genesis_config, rococo_runtime::WASM_BINARY.unwrap()) - } - - #[cfg(test)] - mod legacy_vs_json_check { - use super::*; - fn genesis() -> Storage { - let genesis_config = rococo_runtime::RuntimeGenesisConfig { - system: rococo_runtime::SystemConfig::default(), - balances: rococo_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .map(|k| (k.clone(), ENDOWMENT)) - .collect(), - }, - // indices: rococo_runtime::IndicesConfig { indices: vec![] }, - session: rococo_runtime::SessionConfig { - keys: validators::initial_authorities() - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - get_from_seed::("Alice"), - ), - ) - }) - .collect::>(), - }, - babe: rococo_runtime::BabeConfig { - authorities: Default::default(), - epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG), - ..Default::default() - }, - sudo: rococo_runtime::SudoConfig { - key: Some(get_account_id_from_seed::("Alice")), - }, - configuration: rococo_runtime::ConfigurationConfig { config: get_host_config() }, - paras: rococo_runtime::ParasConfig { - paras: vec![ - ( - asset_hub_rococo::PARA_ID.into(), - ParaGenesisArgs { - genesis_head: HeadData::default(), - validation_code: ValidationCode( - asset_hub_rococo_runtime::WASM_BINARY.unwrap().to_vec(), - ), - para_kind: ParaKind::Parachain, - }, - ), - ( - penpal::PARA_ID_A.into(), - ParaGenesisArgs { - genesis_head: HeadData::default(), - validation_code: ValidationCode( - penpal_runtime::WASM_BINARY.unwrap().to_vec(), - ), - para_kind: ParaKind::Parachain, - }, - ), - ( - penpal::PARA_ID_B.into(), - ParaGenesisArgs { - genesis_head: HeadData::default(), - validation_code: ValidationCode( - penpal_runtime::WASM_BINARY.unwrap().to_vec(), - ), - para_kind: ParaKind::Parachain, - }, - ), - ], - ..Default::default() - }, - registrar: rococo_runtime::RegistrarConfig { - next_free_para_id: polkadot_primitives::LOWEST_PUBLIC_ID, - ..Default::default() - }, - ..Default::default() - }; - - build_genesis_storage_legacy(&genesis_config, rococo_runtime::WASM_BINARY.unwrap()) - } - - #[test] - fn test_genesis() { - let j1 = super::genesis(); - let j2 = genesis(); - - assert_eq!(j1.top, j2.top); - assert_eq!(j1.children_default, j2.children_default); - } - } -} - -// Asset Hub Westend -pub mod asset_hub_westend { - use super::*; - pub const PARA_ID: u32 = 1000; - pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; - - pub fn genesis() -> Storage { - let genesis_config = serde_json::json!({ - "balances": { - "balances": accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": cumulus_primitives_core::ParaId::from(PARA_ID), - }, - "collatorSelection": { - "invulnerables": collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect::>(), - "candidacyBond": ED * 16, - }, - "session": { - "keys": collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_westend_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - }); - - build_genesis_storage( - genesis_config, - asset_hub_westend_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } - - #[cfg(test)] - mod legacy_vs_json_check { - use super::*; - fn genesis() -> Storage { - let genesis_config = asset_hub_westend_runtime::RuntimeGenesisConfig { - system: asset_hub_westend_runtime::SystemConfig::default(), - balances: asset_hub_westend_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect(), - }, - parachain_info: asset_hub_westend_runtime::ParachainInfoConfig { - parachain_id: PARA_ID.into(), - ..Default::default() - }, - collator_selection: asset_hub_westend_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: asset_hub_westend_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_westend_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: asset_hub_westend_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - ..Default::default() - }; - - build_genesis_storage_legacy( - &genesis_config, - asset_hub_westend_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } - - #[test] - fn test_genesis() { - let j1 = super::genesis(); - let j2 = genesis(); - - assert_eq!(j1.top, j2.top); - assert_eq!(j1.children_default, j2.children_default); - } - } -} - -pub mod asset_hub_rococo { - use super::*; - pub const PARA_ID: u32 = 1000; - pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; - - pub fn genesis() -> Storage { - let genesis_config = serde_json::json!({ - "balances": { - "balances": accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096 * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": cumulus_primitives_core::ParaId::from(PARA_ID), - }, - "collatorSelection": { - "invulnerables": collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect::>(), - "candidacyBond": ED * 16, - }, - "session": { - "keys": collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_rococo_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - }); - - build_genesis_storage( - genesis_config, - asset_hub_rococo_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } - - #[cfg(test)] - mod legacy_vs_json_check { - use super::*; - pub fn genesis() -> Storage { - let genesis_config = asset_hub_rococo_runtime::RuntimeGenesisConfig { - system: asset_hub_rococo_runtime::SystemConfig::default(), - balances: asset_hub_rococo_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096 * 4096)) - .collect(), - }, - parachain_info: asset_hub_rococo_runtime::ParachainInfoConfig { - parachain_id: PARA_ID.into(), - ..Default::default() - }, - collator_selection: asset_hub_rococo_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: asset_hub_rococo_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_rococo_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: asset_hub_rococo_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - ..Default::default() - }; - - build_genesis_storage_legacy( - &genesis_config, - asset_hub_rococo_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } - - #[test] - fn test_genesis() { - let j1 = super::genesis(); - let j2 = genesis(); - - assert_eq!(j1.top, j2.top); - assert_eq!(j1.children_default, j2.children_default); - } - } -} - -pub mod asset_hub_wococo { - use super::*; - pub const PARA_ID: u32 = 1000; - pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; - - pub fn genesis() -> Storage { - let genesis_config = serde_json::json!({ - "balances": { - "balances": accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": cumulus_primitives_core::ParaId::from(PARA_ID), - }, - "collatorSelection": { - "invulnerables": collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect::>(), - "candidacyBond": ED * 16, - }, - "session": { - "keys": collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_westend_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - }); - - build_genesis_storage( - genesis_config, - asset_hub_rococo_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } - - #[cfg(test)] - mod legacy_vs_json_check { - use super::*; - pub fn genesis() -> Storage { - let genesis_config = asset_hub_rococo_runtime::RuntimeGenesisConfig { - system: asset_hub_rococo_runtime::SystemConfig::default(), - balances: asset_hub_rococo_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect(), - }, - parachain_info: asset_hub_rococo_runtime::ParachainInfoConfig { - parachain_id: PARA_ID.into(), - ..Default::default() - }, - collator_selection: asset_hub_rococo_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: asset_hub_rococo_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_rococo_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: asset_hub_rococo_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - ..Default::default() - }; - - build_genesis_storage_legacy( - &genesis_config, - asset_hub_rococo_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } - - #[test] - fn test_genesis() { - let j1 = super::genesis(); - let j2 = genesis(); - - assert_eq!(j1.top, j2.top); - assert_eq!(j1.children_default, j2.children_default); - } - } -} - -// Penpal -pub mod penpal { - use super::*; - pub const PARA_ID_A: u32 = 2000; - pub const PARA_ID_B: u32 = 2001; - pub const ED: Balance = penpal_runtime::EXISTENTIAL_DEPOSIT; - - pub fn genesis(para_id: u32) -> Storage { - let genesis_config = serde_json::json!({ - "balances": { - "balances": accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": cumulus_primitives_core::ParaId::from(para_id), - }, - "collatorSelection": { - "invulnerables": collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect::>(), - "candidacyBond": ED * 16, - }, - "session": { - "keys": collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - penpal_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - "sudo": { - "key": Some(get_account_id_from_seed::("Alice")), - }, - }); - - build_genesis_storage( - genesis_config, - penpal_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - ) - } - - #[cfg(test)] - mod legacy_vs_json_check { - use super::*; - fn genesis(para_id: u32) -> Storage { - let genesis_config = penpal_runtime::RuntimeGenesisConfig { - system: penpal_runtime::SystemConfig::default(), - balances: penpal_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect(), - }, - parachain_info: penpal_runtime::ParachainInfoConfig { - parachain_id: para_id.into(), - ..Default::default() - }, - collator_selection: penpal_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: penpal_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - penpal_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: penpal_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - sudo: penpal_runtime::SudoConfig { - key: Some(get_account_id_from_seed::("Alice")), - }, - ..Default::default() - }; - - build_genesis_storage_legacy( - &genesis_config, - penpal_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - ) - } - - #[test] - fn test_genesis() { - let j1 = super::genesis(101); - let j2 = genesis(101); - - assert_eq!(j1.top, j2.top); - assert_eq!(j1.children_default, j2.children_default); - } - } -} - -// Bridge Hub Rococo & Bridge Hub Wococo -pub mod bridge_hub_rococo { - use super::*; - pub const PARA_ID: u32 = 1013; - pub const ED: Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; - - pub fn genesis() -> Storage { - let genesis_config = serde_json::json!({ - "balances": { - "balances": accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": cumulus_primitives_core::ParaId::from(PARA_ID), - }, - "collatorSelection": { - "invulnerables": collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect::>(), - "candidacyBond": ED * 16, - }, - "session": { - "keys": collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_rococo_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - "bridgeWococoGrandpa": { - "owner": Some(get_account_id_from_seed::(accounts::BOB)), - }, - "bridgeRococoGrandpa": { - "owner": Some(get_account_id_from_seed::(accounts::BOB)), - }, - "bridgeRococoMessages": { - "owner": Some(get_account_id_from_seed::(accounts::BOB)), - }, - "bridgeWococoMessages": { - "owner": Some(get_account_id_from_seed::(accounts::BOB)), - } - }); - - build_genesis_storage( - genesis_config, - bridge_hub_rococo_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } - - #[cfg(test)] - mod legacy_vs_json_check { - use super::*; - fn genesis() -> Storage { - let genesis_config = bridge_hub_rococo_runtime::RuntimeGenesisConfig { - system: bridge_hub_rococo_runtime::SystemConfig::default(), - balances: bridge_hub_rococo_runtime::BalancesConfig { - balances: accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect(), - }, - parachain_info: bridge_hub_rococo_runtime::ParachainInfoConfig { - parachain_id: PARA_ID.into(), - ..Default::default() - }, - collator_selection: bridge_hub_rococo_runtime::CollatorSelectionConfig { - invulnerables: collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect(), - candidacy_bond: ED * 16, - ..Default::default() - }, - session: bridge_hub_rococo_runtime::SessionConfig { - keys: collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_rococo_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect(), - }, - polkadot_xcm: bridge_hub_rococo_runtime::PolkadotXcmConfig { - safe_xcm_version: Some(SAFE_XCM_VERSION), - ..Default::default() - }, - bridge_wococo_grandpa: bridge_hub_rococo_runtime::BridgeWococoGrandpaConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_rococo_grandpa: bridge_hub_rococo_runtime::BridgeRococoGrandpaConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_rococo_messages: bridge_hub_rococo_runtime::BridgeRococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_wococo_messages: bridge_hub_rococo_runtime::BridgeWococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - ..Default::default() - }; - - build_genesis_storage_legacy( - &genesis_config, - bridge_hub_rococo_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } - - #[test] - fn test_genesis() { - let j1 = super::genesis(); - let j2 = genesis(); - - assert_eq!(j1.top, j2.top); - assert_eq!(j1.children_default, j2.children_default); - } - } -} - -// Bridge Hub Westend -pub mod bridge_hub_westend { - use super::*; - pub const PARA_ID: u32 = 1013; - pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; - - pub fn genesis() -> Storage { - let genesis_config = serde_json::json!({ - "balances": { - "balances": accounts::init_balances() - .iter() - .cloned() - .map(|k| (k, ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": cumulus_primitives_core::ParaId::from(PARA_ID), - }, - "collatorSelection": { - "invulnerables": collators::invulnerables() - .iter() - .cloned() - .map(|(acc, _)| acc) - .collect::>(), - "candidacyBond": ED * 16, - }, - "session": { - "keys": collators::invulnerables() - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_westend_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - "bridgeRococoGrandpa": { - "owner": Some(get_account_id_from_seed::(accounts::BOB)), - }, - "bridgeRococoMessages": { - "owner": Some(get_account_id_from_seed::(accounts::BOB)), - } - }); - - build_genesis_storage( - genesis_config, - bridge_hub_westend_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - ) - } -} diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 613bb302e56..6c99c1614db 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -17,50 +17,53 @@ pub use codec::{Decode, Encode}; pub use paste; pub use crate::{ - constants::{PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD}, - xcm_helpers::xcm_transact_unpaid_execution, - BridgeHubRococo, BridgeHubWococo, + xcm_helpers::xcm_transact_unpaid_execution, PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, }; // Substrate pub use frame_support::{ assert_ok, + sp_runtime::AccountId32, traits::fungibles::Inspect, weights::{Weight, WeightMeter}, }; pub use pallet_assets; pub use pallet_message_queue; +pub use pallet_xcm; use sp_core::Get; -// Cumulus -use bp_messages::{ - target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - LaneId, MessageKey, OutboundLaneData, +// Polkadot +pub use polkadot_runtime_parachains::{ + dmp, hrmp, + inclusion::{AggregateMessageOrigin, UmpQueueId}, }; -use bridge_runtime_common::messages_xcm_extension::XcmBlobMessageDispatchResult; +pub use xcm::{ + prelude::{MultiLocation, OriginKind, Outcome, VersionedXcm}, + v3::Error, + DoubleEncoded, +}; + +// Cumulus pub use cumulus_pallet_parachain_system; pub use cumulus_pallet_xcmp_queue; pub use cumulus_primitives_core::{ relay_chain::HrmpChannelId, DmpMessageHandler, ParaId, XcmpMessageHandler, }; -use pallet_bridge_messages::{Config, Instance1, Instance2, OutboundLanes, Pallet}; pub use parachains_common::{AccountId, Balance}; pub use xcm_emulator::{ assert_expected_events, bx, helpers::weight_within_threshold, BridgeMessage, - BridgeMessageDispatchError, BridgeMessageHandler, Chain, Parachain, RelayChain, TestExt, + BridgeMessageDispatchError, BridgeMessageHandler, Chain, Network, Parachain, RelayChain, + TestExt, }; -// Polkadot -pub use pallet_xcm; -pub use polkadot_runtime_parachains::{ - dmp, hrmp, - inclusion::{AggregateMessageOrigin, UmpQueueId}, -}; -pub use xcm::{ - prelude::{MultiLocation, OriginKind, Outcome, VersionedXcm}, - v3::Error, - DoubleEncoded, +// Bridges +use bp_messages::{ + target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, + LaneId, MessageKey, OutboundLaneData, }; +use bridge_runtime_common::messages_xcm_extension::XcmBlobMessageDispatchResult; +pub use pallet_bridge_messages::Instance2 as BridgeMessagesInstance2; +use pallet_bridge_messages::{Config, Instance1, OutboundLanes, Pallet}; pub struct BridgeHubMessageHandler { _marker: std::marker::PhantomData<(S, T, I)>, @@ -80,14 +83,6 @@ impl From for LaneIdWrapper { } } -type BridgeHubRococoRuntime = ::Runtime; -type BridgeHubWococoRuntime = ::Runtime; - -pub type RococoWococoMessageHandler = - BridgeHubMessageHandler; -pub type WococoRococoMessageHandler = - BridgeHubMessageHandler; - impl BridgeMessageHandler for BridgeHubMessageHandler where S: Config, @@ -171,12 +166,12 @@ where macro_rules! impl_accounts_helpers_for_relay_chain { ( $chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// Fund a set of accounts with a balance pub fn fund_accounts(accounts: Vec<($crate::impls::AccountId, $crate::impls::Balance)>) { ::execute_with(|| { for account in accounts { - $crate::impls::assert_ok!(]>::Balances::force_set_balance( + $crate::impls::assert_ok!(]>::Balances::force_set_balance( ::RuntimeOrigin::root(), account.0.into(), account.1, @@ -185,7 +180,7 @@ macro_rules! impl_accounts_helpers_for_relay_chain { }); } /// Fund a sovereign account based on its Parachain Id - pub fn fund_para_sovereign(amount: $crate::impls::Balance, para_id: $crate::impls::ParaId) -> sp_runtime::AccountId32 { + pub fn fund_para_sovereign(amount: $crate::impls::Balance, para_id: $crate::impls::ParaId) -> $crate::impls::AccountId32 { let sovereign_account = ::sovereign_account_id_of_child_para(para_id); Self::fund_accounts(vec![(sovereign_account.clone(), amount)]); sovereign_account @@ -199,15 +194,15 @@ macro_rules! impl_accounts_helpers_for_relay_chain { macro_rules! impl_assert_events_helpers_for_relay_chain { ( $chain:ident ) => { $crate::impls::paste::paste! { - type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; + type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; - impl $chain { + impl $chain { /// Asserts a dispatchable is completely executed and XCM sent pub fn assert_xcm_pallet_attempted_complete(expected_weight: Option<$crate::impls::Weight>) { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::XcmPallet( + [<$chain RuntimeEvent>]::::XcmPallet( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Complete(weight) } ) => { weight: $crate::impls::weight_within_threshold( @@ -229,7 +224,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { Self, vec![ // Dispatchable is properly executed and XCM message sent - [<$chain RuntimeEvent>]::XcmPallet( + [<$chain RuntimeEvent>]::::XcmPallet( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Incomplete(weight, error) } ) => { weight: $crate::impls::weight_within_threshold( @@ -248,7 +243,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::XcmPallet($crate::impls::pallet_xcm::Event::Sent { .. }) => {}, + [<$chain RuntimeEvent>]::::XcmPallet($crate::impls::pallet_xcm::Event::Sent { .. }) => {}, ] ); } @@ -263,7 +258,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { Self, vec![ // XCM is succesfully received and proccessed - [<$chain RuntimeEvent>]::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { origin: $crate::impls::AggregateMessageOrigin::Ump($crate::impls::UmpQueueId::Para(id)), weight_used, success, @@ -289,7 +284,7 @@ macro_rules! impl_assert_events_helpers_for_relay_chain { macro_rules! impl_hrmp_channels_helpers_for_relay_chain { ( $chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// Init open channel request with another Parachain pub fn init_open_channel_call( recipient_para_id: $crate::impls::ParaId, @@ -329,7 +324,7 @@ macro_rules! impl_hrmp_channels_helpers_for_relay_chain { let relay_root_origin = ::RuntimeOrigin::root(); // Force process HRMP open channel requests without waiting for the next session - $crate::impls::assert_ok!(]>::Hrmp::force_process_hrmp_open( + $crate::impls::assert_ok!(]>::Hrmp::force_process_hrmp_open( relay_root_origin, 0 )); @@ -353,7 +348,7 @@ macro_rules! impl_hrmp_channels_helpers_for_relay_chain { macro_rules! impl_send_transact_helpers_for_relay_chain { ( $chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// A root origin (as governance) sends `xcm::Transact` with `UnpaidExecution` and encoded `call` to child parachain. pub fn send_unpaid_transact_to_parachain_as_root( recipient: $crate::impls::ParaId, @@ -367,7 +362,7 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { let xcm = $crate::impls::xcm_transact_unpaid_execution(call, $crate::impls::OriginKind::Superuser); // Send XCM `Transact` - $crate::impls::assert_ok!(]>::XcmPallet::send( + $crate::impls::assert_ok!(]>::XcmPallet::send( root_origin, bx!(destination.into()), bx!(xcm), @@ -384,12 +379,12 @@ macro_rules! impl_send_transact_helpers_for_relay_chain { macro_rules! impl_accounts_helpers_for_parachain { ( $chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// Fund a set of accounts with a balance pub fn fund_accounts(accounts: Vec<($crate::impls::AccountId, $crate::impls::Balance)>) { ::execute_with(|| { for account in accounts { - $crate::impls::assert_ok!(]>::Balances::force_set_balance( + $crate::impls::assert_ok!(]>::Balances::force_set_balance( ::RuntimeOrigin::root(), account.0.into(), account.1, @@ -406,15 +401,15 @@ macro_rules! impl_accounts_helpers_for_parachain { macro_rules! impl_assert_events_helpers_for_parachain { ( $chain:ident ) => { $crate::impls::paste::paste! { - type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; + type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; - impl $chain { + impl $chain { /// Asserts a dispatchable is completely executed and XCM sent pub fn assert_xcm_pallet_attempted_complete(expected_weight: Option<$crate::impls::Weight>) { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::PolkadotXcm( + [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Complete(weight) } ) => { weight: $crate::impls::weight_within_threshold( @@ -436,7 +431,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { Self, vec![ // Dispatchable is properly executed and XCM message sent - [<$chain RuntimeEvent>]::PolkadotXcm( + [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Incomplete(weight, error) } ) => { weight: $crate::impls::weight_within_threshold( @@ -456,7 +451,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { Self, vec![ // Execution fails in the origin with `Barrier` - [<$chain RuntimeEvent>]::PolkadotXcm( + [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Error(error) } ) => { error: *error == expected_error.unwrap_or(*error), @@ -470,7 +465,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::PolkadotXcm($crate::impls::pallet_xcm::Event::Sent { .. }) => {}, + [<$chain RuntimeEvent>]::::PolkadotXcm($crate::impls::pallet_xcm::Event::Sent { .. }) => {}, ] ); } @@ -480,7 +475,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::ParachainSystem( + [<$chain RuntimeEvent>]::::ParachainSystem( $crate::impls::cumulus_pallet_parachain_system::Event::UpwardMessageSent { .. } ) => {}, ] @@ -492,7 +487,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::MessageQueue(pallet_message_queue::Event::Processed { + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. }) => { weight: $crate::impls::weight_within_threshold( @@ -512,7 +507,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::MessageQueue(pallet_message_queue::Event::Processed { + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: false, weight_used: weight, .. }) => { weight: $crate::impls::weight_within_threshold( @@ -530,7 +525,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::MessageQueue($crate::impls::pallet_message_queue::Event::ProcessingFailed { + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::ProcessingFailed { .. }) => { @@ -544,7 +539,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - [<$chain RuntimeEvent>]::MessageQueue(pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. } + [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. } ) => { weight: $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), @@ -561,10 +556,10 @@ macro_rules! impl_assert_events_helpers_for_parachain { } #[macro_export] -macro_rules! impl_assets_helpers_for_parachain { +macro_rules! impl_assets_helpers_for_system_parachain { ( $chain:ident, $relay_chain:ident ) => { $crate::impls::paste::paste! { - impl $chain { + impl $chain { /// Returns the encoded call for `force_create` from the assets pallet pub fn force_create_asset_call( asset_id: u32, @@ -607,19 +602,19 @@ macro_rules! impl_assets_helpers_for_parachain { amount_to_mint: u128, ) { ::execute_with(|| { - $crate::impls::assert_ok!(]>::Assets::mint( + $crate::impls::assert_ok!(]>::Assets::mint( signed_origin, id.into(), beneficiary.clone().into(), amount_to_mint )); - type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::Assets($crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount }) => { + RuntimeEvent::::Assets($crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount }) => { asset_id: *asset_id == id, owner: *owner == beneficiary.clone().into(), amount: *amount == amount_to_mint, @@ -664,28 +659,28 @@ macro_rules! impl_assets_helpers_for_parachain { ) { use $crate::impls::{Parachain, Inspect, TestExt}; - <$relay_chain>::send_unpaid_transact_to_parachain_as_root( + <$relay_chain>::send_unpaid_transact_to_parachain_as_root( Self::para_id(), Self::force_create_asset_call(id, asset_owner.clone(), is_sufficient, min_balance), ); // Receive XCM message in Assets Parachain Self::execute_with(|| { - type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; Self::assert_dmp_queue_complete(dmp_weight_threshold); $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::Assets($crate::impls::pallet_assets::Event::ForceCreated { asset_id, owner }) => { + RuntimeEvent::::Assets($crate::impls::pallet_assets::Event::ForceCreated { asset_id, owner }) => { asset_id: *asset_id == id, owner: *owner == asset_owner, }, ] ); - assert!(]>::Assets::asset_exists(id.into())); + assert!(]>::Assets::asset_exists(id.into())); }); } } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 1debd50d609..74db02db003 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -13,361 +13,161 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod constants; pub mod impls; pub mod macros; pub mod xcm_helpers; -use constants::{ - accounts::{ALICE, BOB}, - asset_hub_rococo, asset_hub_westend, asset_hub_wococo, bridge_hub_rococo, bridge_hub_westend, - penpal, rococo, westend, -}; -use impls::{RococoWococoMessageHandler, WococoRococoMessageHandler}; -pub use paste; +pub use xcm_emulator; // Substrate -use frame_support::traits::OnInitialize; -pub use pallet_balances; -pub use pallet_message_queue; +use grandpa::AuthorityId as GrandpaId; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::{sr25519, storage::Storage, Pair, Public}; +use sp_runtime::{ + traits::{IdentifyAccount, Verify}, + BuildStorage, MultiSignature, +}; + +// Polakdot +use parachains_common::BlockNumber; +use polkadot_runtime_parachains::configuration::HostConfiguration; +use xcm; // Cumulus -pub use cumulus_pallet_xcmp_queue; -pub use xcm_emulator::Chain; -use xcm_emulator::{ - decl_test_bridges, decl_test_networks, decl_test_parachains, decl_test_relay_chains, - decl_test_sender_receiver_accounts_parameter_types, DefaultParaMessageProcessor, - DefaultRelayMessageProcessor, Parachain, TestExt, -}; +use parachains_common::{AccountId, AssetHubPolkadotAuraId, AuraId}; +use polkadot_primitives::{AssignmentId, ValidatorId}; +use polkadot_service::chain_spec::get_authority_keys_from_seed_no_beefy; -// Polkadot -pub use pallet_xcm; -pub use xcm::prelude::{AccountId32, WeightLimit}; +pub const XCM_V2: u32 = 2; +pub const XCM_V3: u32 = 3; +pub const REF_TIME_THRESHOLD: u64 = 33; +pub const PROOF_SIZE_THRESHOLD: u64 = 33; -decl_test_relay_chains! { - #[api_version(8)] - pub struct Westend { - genesis = westend::genesis(), - on_init = (), - runtime = westend_runtime, - core = { - MessageProcessor: DefaultRelayMessageProcessor, - SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, - }, - pallets = { - XcmPallet: westend_runtime::XcmPallet, - Sudo: westend_runtime::Sudo, - Balances: westend_runtime::Balances, - Treasury: westend_runtime::Treasury, - AssetRate: westend_runtime::AssetRate, - } - }, - #[api_version(8)] - pub struct Rococo { - genesis = rococo::genesis(), - on_init = (), - runtime = rococo_runtime, - core = { - MessageProcessor: DefaultRelayMessageProcessor, - SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, - }, - pallets = { - XcmPallet: rococo_runtime::XcmPallet, - Sudo: rococo_runtime::Sudo, - Balances: rococo_runtime::Balances, - Hrmp: rococo_runtime::Hrmp, - } - }, - #[api_version(8)] - pub struct Wococo { - genesis = rococo::genesis(), - on_init = (), - runtime = rococo_runtime, - core = { - MessageProcessor: DefaultRelayMessageProcessor, - SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, - }, - pallets = { - XcmPallet: rococo_runtime::XcmPallet, - Sudo: rococo_runtime::Sudo, - Balances: rococo_runtime::Balances, - } - } -} +/// The default XCM version to set in genesis config. +pub const SAFE_XCM_VERSION: u32 = xcm::prelude::XCM_VERSION; -decl_test_parachains! { - // Westend Parachains - pub struct AssetHubWestend { - genesis = asset_hub_westend::genesis(), - on_init = { - asset_hub_westend_runtime::AuraExt::on_initialize(1); - }, - runtime = asset_hub_westend_runtime, - core = { - XcmpMessageHandler: asset_hub_westend_runtime::XcmpQueue, - LocationToAccountId: asset_hub_westend_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_westend_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: asset_hub_westend_runtime::PolkadotXcm, - Balances: asset_hub_westend_runtime::Balances, - Assets: asset_hub_westend_runtime::Assets, - ForeignAssets: asset_hub_westend_runtime::ForeignAssets, - PoolAssets: asset_hub_westend_runtime::PoolAssets, - AssetConversion: asset_hub_westend_runtime::AssetConversion, - } - }, - pub struct BridgeHubWestend { - genesis = bridge_hub_westend::genesis(), - on_init = { - bridge_hub_westend_runtime::AuraExt::on_initialize(1); - }, - runtime = bridge_hub_westend_runtime, - core = { - XcmpMessageHandler: bridge_hub_westend_runtime::XcmpQueue, - LocationToAccountId: bridge_hub_westend_runtime::xcm_config::LocationToAccountId, - ParachainInfo: bridge_hub_westend_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: bridge_hub_westend_runtime::PolkadotXcm, - Balances: bridge_hub_westend_runtime::Balances, - } - }, - pub struct PenpalWestendA { - genesis = penpal::genesis(penpal::PARA_ID_A), - on_init = { - penpal_runtime::AuraExt::on_initialize(1); - }, - runtime = penpal_runtime, - core = { - XcmpMessageHandler: penpal_runtime::XcmpQueue, - LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, - ParachainInfo: penpal_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: penpal_runtime::PolkadotXcm, - Assets: penpal_runtime::Assets, - Balances: penpal_runtime::Balances, - } - }, - // Rococo Parachains - pub struct BridgeHubRococo { - genesis = bridge_hub_rococo::genesis(), - on_init = { - bridge_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = bridge_hub_rococo_runtime, - core = { - XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, - Balances: bridge_hub_rococo_runtime::Balances, - } - }, - // AssetHubRococo - pub struct AssetHubRococo { - genesis = asset_hub_rococo::genesis(), - on_init = { - asset_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = asset_hub_rococo_runtime, - core = { - XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, - Assets: asset_hub_rococo_runtime::Assets, - ForeignAssets: asset_hub_rococo_runtime::ForeignAssets, - PoolAssets: asset_hub_rococo_runtime::PoolAssets, - AssetConversion: asset_hub_rococo_runtime::AssetConversion, - Balances: asset_hub_rococo_runtime::Balances, - } - }, - pub struct PenpalRococoA { - genesis = penpal::genesis(penpal::PARA_ID_A), - on_init = { - penpal_runtime::AuraExt::on_initialize(1); - }, - runtime = penpal_runtime, - core = { - XcmpMessageHandler: penpal_runtime::XcmpQueue, - LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, - ParachainInfo: penpal_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: penpal_runtime::PolkadotXcm, - Assets: penpal_runtime::Assets, - } - }, - pub struct PenpalRococoB { - genesis = penpal::genesis(penpal::PARA_ID_B), - on_init = { - penpal_runtime::AuraExt::on_initialize(1); - }, - runtime = penpal_runtime, - core = { - XcmpMessageHandler: penpal_runtime::XcmpQueue, - LocationToAccountId: penpal_runtime::xcm_config::LocationToAccountId, - ParachainInfo: penpal_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: penpal_runtime::PolkadotXcm, - Assets: penpal_runtime::Assets, - } - }, - // Wococo Parachains - pub struct BridgeHubWococo { - genesis = bridge_hub_rococo::genesis(), - on_init = { - bridge_hub_rococo_runtime::AuraExt::on_initialize(1); - // TODO: manage to set_wococo_flavor with `set_storage` - }, - runtime = bridge_hub_rococo_runtime, - core = { - XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, - } - }, - pub struct AssetHubWococo { - genesis = asset_hub_wococo::genesis(), - on_init = { - asset_hub_rococo_runtime::AuraExt::on_initialize(1); - // TODO: manage to set_wococo_flavor with `set_storage` - }, - runtime = asset_hub_rococo_runtime, - core = { - XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, - MessageProcessor: DefaultParaMessageProcessor, - }, - pallets = { - PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, - Assets: asset_hub_rococo_runtime::Assets, - ForeignAssets: asset_hub_rococo_runtime::ForeignAssets, - PoolAssets: asset_hub_rococo_runtime::PoolAssets, - AssetConversion: asset_hub_rococo_runtime::AssetConversion, - Balances: asset_hub_rococo_runtime::Balances, - } - } +type AccountPublic = ::Signer; + +/// Helper function to generate a crypto pair from seed +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() } -decl_test_networks! { - pub struct WestendMockNet { - relay_chain = Westend, - parachains = vec![ - AssetHubWestend, - BridgeHubWestend, - PenpalWestendA, - ], - bridge = () - }, - pub struct RococoMockNet { - relay_chain = Rococo, - parachains = vec![ - AssetHubRococo, - BridgeHubRococo, - PenpalRococoA, - PenpalRococoB, - ], - bridge = RococoWococoMockBridge - }, - pub struct WococoMockNet { - relay_chain = Wococo, - parachains = vec![ - AssetHubWococo, - BridgeHubWococo, - ], - bridge = WococoRococoMockBridge - } +/// Helper function to generate an account ID from seed. +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, +{ + AccountPublic::from(get_from_seed::(seed)).into_account() } -decl_test_bridges! { - pub struct RococoWococoMockBridge { - source = BridgeHubRococo, - target = BridgeHubWococo, - handler = RococoWococoMessageHandler - }, - pub struct WococoRococoMockBridge { - source = BridgeHubWococo, - target = BridgeHubRococo, - handler = WococoRococoMessageHandler +pub fn get_host_config() -> HostConfiguration { + HostConfiguration { + max_upward_queue_count: 10, + max_upward_queue_size: 51200, + max_upward_message_size: 51200, + max_upward_message_num_per_candidate: 10, + max_downward_message_size: 51200, + hrmp_sender_deposit: 0, + hrmp_recipient_deposit: 0, + hrmp_channel_max_capacity: 1000, + hrmp_channel_max_message_size: 102400, + hrmp_channel_max_total_size: 102400, + hrmp_max_parachain_outbound_channels: 30, + hrmp_max_parachain_inbound_channels: 30, + ..Default::default() } } -// Westend implementation -impl_accounts_helpers_for_relay_chain!(Westend); -impl_assert_events_helpers_for_relay_chain!(Westend); -impl_send_transact_helpers_for_relay_chain!(Westend); - -// Rococo implementation -impl_accounts_helpers_for_relay_chain!(Rococo); -impl_assert_events_helpers_for_relay_chain!(Rococo); -impl_hrmp_channels_helpers_for_relay_chain!(Rococo); -impl_send_transact_helpers_for_relay_chain!(Rococo); - -// Wococo implementation -impl_accounts_helpers_for_relay_chain!(Wococo); -impl_assert_events_helpers_for_relay_chain!(Wococo); -impl_send_transact_helpers_for_relay_chain!(Wococo); - -// AssetHubWestend implementation -impl_accounts_helpers_for_parachain!(AssetHubWestend); -impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); -impl_assert_events_helpers_for_parachain!(AssetHubWestend); - -// AssetHubRococo implementation -impl_accounts_helpers_for_parachain!(AssetHubRococo); -impl_assets_helpers_for_parachain!(AssetHubRococo, Rococo); -impl_assert_events_helpers_for_parachain!(AssetHubRococo); - -// PenpalWestendA implementation -impl_assert_events_helpers_for_parachain!(PenpalWestendA); +/// Helper function used in tests to build the genesis storage using given RuntimeGenesisConfig and +/// code Used in `legacy_vs_json_check` submods to verify storage building with JSON patch against +/// building with RuntimeGenesisConfig struct. +pub fn build_genesis_storage_legacy(builder: &dyn BuildStorage, code: &[u8]) -> Storage { + let mut storage = builder.build_storage().unwrap(); + storage + .top + .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.into()); + storage +} -// BridgeHubWestend implementation -impl_accounts_helpers_for_parachain!(BridgeHubWestend); -impl_assert_events_helpers_for_parachain!(BridgeHubWestend); +pub mod accounts { + use super::*; + pub const ALICE: &str = "Alice"; + pub const BOB: &str = "Bob"; + pub const CHARLIE: &str = "Charlie"; + pub const DAVE: &str = "Dave"; + pub const EVE: &str = "Eve"; + pub const FERDIE: &str = "Ferdei"; + pub const ALICE_STASH: &str = "Alice//stash"; + pub const BOB_STASH: &str = "Bob//stash"; + pub const CHARLIE_STASH: &str = "Charlie//stash"; + pub const DAVE_STASH: &str = "Dave//stash"; + pub const EVE_STASH: &str = "Eve//stash"; + pub const FERDIE_STASH: &str = "Ferdie//stash"; + pub const FERDIE_BEEFY: &str = "Ferdie//stash"; + + pub fn init_balances() -> Vec { + vec![ + get_account_id_from_seed::(ALICE), + get_account_id_from_seed::(BOB), + get_account_id_from_seed::(CHARLIE), + get_account_id_from_seed::(DAVE), + get_account_id_from_seed::(EVE), + get_account_id_from_seed::(FERDIE), + get_account_id_from_seed::(ALICE_STASH), + get_account_id_from_seed::(BOB_STASH), + get_account_id_from_seed::(CHARLIE_STASH), + get_account_id_from_seed::(DAVE_STASH), + get_account_id_from_seed::(EVE_STASH), + get_account_id_from_seed::(FERDIE_STASH), + ] + } +} -// BridgeHubRococo implementation -impl_accounts_helpers_for_parachain!(BridgeHubRococo); -impl_assert_events_helpers_for_parachain!(BridgeHubRococo); +pub mod collators { + use super::*; + + pub fn invulnerables_asset_hub_polkadot() -> Vec<(AccountId, AssetHubPolkadotAuraId)> { + vec![ + ( + get_account_id_from_seed::("Alice"), + get_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_from_seed::("Bob"), + ), + ] + } -// PenpalRococo implementations -impl_assert_events_helpers_for_parachain!(PenpalRococoA); -impl_assert_events_helpers_for_parachain!(PenpalRococoB); + pub fn invulnerables() -> Vec<(AccountId, AuraId)> { + vec![ + ( + get_account_id_from_seed::("Alice"), + get_from_seed::("Alice"), + ), + (get_account_id_from_seed::("Bob"), get_from_seed::("Bob")), + ] + } +} -decl_test_sender_receiver_accounts_parameter_types! { - // Relays - Westend { sender: ALICE, receiver: BOB }, - Rococo { sender: ALICE, receiver: BOB }, - Wococo { sender: ALICE, receiver: BOB }, - // Asset Hubs - AssetHubWestend { sender: ALICE, receiver: BOB }, - AssetHubRococo { sender: ALICE, receiver: BOB }, - AssetHubWococo { sender: ALICE, receiver: BOB }, - // Bridged Hubs - BridgeHubRococo { sender: ALICE, receiver: BOB }, - BridgeHubWococo { sender: ALICE, receiver: BOB }, - BridgeHubWestend { sender: ALICE, receiver: BOB }, - // Penpals - PenpalWestendA { sender: ALICE, receiver: BOB }, - PenpalRococoA { sender: ALICE, receiver: BOB }, - PenpalRococoB { sender: ALICE, receiver: BOB } +pub mod validators { + use super::*; + + pub fn initial_authorities() -> Vec<( + AccountId, + AccountId, + BabeId, + GrandpaId, + ImOnlineId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + )> { + vec![get_authority_keys_from_seed_no_beefy("Alice")] + } } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 6efe7f1731e..6ea3524ed4a 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -13,28 +13,43 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub use paste; + +// Substrate +pub use pallet_balances; +pub use pallet_message_queue; +pub use pallet_xcm; + +// Polkadot +pub use xcm::prelude::{AccountId32, WeightLimit}; + +// Cumulus +pub use asset_test_utils; +pub use cumulus_pallet_xcmp_queue; +pub use xcm_emulator::Chain; + #[macro_export] macro_rules! test_parachain_is_trusted_teleporter { ( $sender_para:ty, $sender_xcm_config:ty, vec![$( $receiver_para:ty ),+], ($assets:expr, $amount:expr) ) => { - $crate::paste::paste! { + $crate::macros::paste::paste! { // init Origin variables let sender = [<$sender_para Sender>]::get(); let mut para_sender_balance_before = - <$sender_para as $crate::Chain>::account_data_of(sender.clone()).free; - let origin = <$sender_para as $crate::Chain>::RuntimeOrigin::signed(sender.clone()); + <$sender_para as $crate::macros::Chain>::account_data_of(sender.clone()).free; + let origin = <$sender_para as $crate::macros::Chain>::RuntimeOrigin::signed(sender.clone()); let fee_asset_item = 0; - let weight_limit = $crate::WeightLimit::Unlimited; + let weight_limit = $crate::macros::WeightLimit::Unlimited; $( { // init Destination variables let receiver = [<$receiver_para Receiver>]::get(); let para_receiver_balance_before = - <$receiver_para as $crate::Chain>::account_data_of(receiver.clone()).free; + <$receiver_para as $crate::macros::Chain>::account_data_of(receiver.clone()).free; let para_destination = <$sender_para>::sibling_location_of(<$receiver_para>::para_id()); let beneficiary: MultiLocation = - $crate::AccountId32 { network: None, id: receiver.clone().into() }.into(); + $crate::macros::AccountId32 { network: None, id: receiver.clone().into() }.into(); // Send XCM message from Origin Parachain // We are only testing the limited teleport version, which should be ok since success will @@ -49,19 +64,19 @@ macro_rules! test_parachain_is_trusted_teleporter { weight_limit.clone(), )); - type RuntimeEvent = <$sender_para as $crate::Chain>::RuntimeEvent; + type RuntimeEvent = <$sender_para as $crate::macros::Chain>::RuntimeEvent; assert_expected_events!( $sender_para, vec![ RuntimeEvent::PolkadotXcm( - $crate::pallet_xcm::Event::Attempted { outcome: Outcome::Complete { .. } } + $crate::macros::pallet_xcm::Event::Attempted { outcome: Outcome::Complete { .. } } ) => {}, RuntimeEvent::XcmpQueue( - $crate::cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + $crate::macros::cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } ) => {}, RuntimeEvent::Balances( - $crate::pallet_balances::Event::Withdraw { who: sender, amount } + $crate::macros::pallet_balances::Event::Withdraw { who: sender, amount } ) => {}, ] ); @@ -69,16 +84,16 @@ macro_rules! test_parachain_is_trusted_teleporter { // Receive XCM message in Destination Parachain <$receiver_para>::execute_with(|| { - type RuntimeEvent = <$receiver_para as $crate::Chain>::RuntimeEvent; + type RuntimeEvent = <$receiver_para as $crate::macros::Chain>::RuntimeEvent; assert_expected_events!( $receiver_para, vec![ RuntimeEvent::Balances( - $crate::pallet_balances::Event::Deposit { who: receiver, .. } + $crate::macros::pallet_balances::Event::Deposit { who: receiver, .. } ) => {}, RuntimeEvent::MessageQueue( - $crate::pallet_message_queue::Event::Processed { success: true, .. } + $crate::macros::pallet_message_queue::Event::Processed { success: true, .. } ) => {}, ] ); @@ -86,11 +101,11 @@ macro_rules! test_parachain_is_trusted_teleporter { // Check if balances are updated accordingly in Origin and Destination Parachains let para_sender_balance_after = - <$sender_para as $crate::Chain>::account_data_of(sender.clone()).free; + <$sender_para as $crate::macros::Chain>::account_data_of(sender.clone()).free; let para_receiver_balance_after = - <$receiver_para as $crate::Chain>::account_data_of(receiver.clone()).free; + <$receiver_para as $crate::macros::Chain>::account_data_of(receiver.clone()).free; let delivery_fees = <$sender_para>::execute_with(|| { - asset_test_utils::xcm_helpers::transfer_assets_delivery_fees::< + $crate::macros::asset_test_utils::xcm_helpers::transfer_assets_delivery_fees::< <$sender_xcm_config as xcm_executor::Config>::XcmSender, >($assets.clone(), fee_asset_item, weight_limit.clone(), beneficiary, para_destination) }); @@ -99,7 +114,7 @@ macro_rules! test_parachain_is_trusted_teleporter { assert!(para_receiver_balance_after > para_receiver_balance_before); // Update sender balance - para_sender_balance_before = <$sender_para as $crate::Chain>::account_data_of(sender.clone()).free; + para_sender_balance_before = <$sender_para as $crate::macros::Chain>::account_data_of(sender.clone()).free; } )+ } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs index dc6d19d06f4..47e92ed075f 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs @@ -13,7 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Cumulus use parachains_common::AccountId; + +// Polkadot use xcm::{prelude::*, DoubleEncoded}; /// Helper method to build a XCM with a `Transact` instruction and paying for its execution diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml new file mode 100644 index 00000000000..713cc2ecdbb --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "rococo-system-emulated-network" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Rococo System emulated network" +publish = false + +[dependencies] +# Cumulus +emulated-integration-tests-common = { path = "../../common", default-features = false } +rococo-emulated-chain = { path = "../../chains/relays/rococo" } +asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } +bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } +penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/src/lib.rs new file mode 100644 index 00000000000..ad22185fa70 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-system/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use asset_hub_rococo_emulated_chain; +pub use bridge_hub_rococo_emulated_chain; +pub use penpal_emulated_chain; +pub use rococo_emulated_chain; + +use asset_hub_rococo_emulated_chain::AssetHubRococo; +use bridge_hub_rococo_emulated_chain::BridgeHubRococo; +use penpal_emulated_chain::{PenpalA, PenpalB}; +use rococo_emulated_chain::Rococo; + +// Cumulus +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + xcm_emulator::{decl_test_networks, decl_test_sender_receiver_accounts_parameter_types}, +}; + +decl_test_networks! { + pub struct RococoMockNet { + relay_chain = Rococo, + parachains = vec![ + AssetHubRococo, + BridgeHubRococo, + PenpalA, + PenpalB, + ], + bridge = () + }, +} + +decl_test_sender_receiver_accounts_parameter_types! { + RococoRelay { sender: ALICE, receiver: BOB }, + AssetHubRococoPara { sender: ALICE, receiver: BOB }, + BridgeHubRococoPara { sender: ALICE, receiver: BOB }, + PenpalAPara { sender: ALICE, receiver: BOB }, + PenpalBPara { sender: ALICE, receiver: BOB } +} diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml new file mode 100644 index 00000000000..53a6f0840a5 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "rococo-wococo-system-emulated-network" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Rococo<>Wococo emulated bridged network" +publish = false + +[dependencies] +# Cumulus +emulated-integration-tests-common = { path = "../../common", default-features = false } +rococo-emulated-chain = { path = "../../chains/relays/rococo" } +wococo-emulated-chain = { path = "../../chains/relays/wococo" } +asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } +asset-hub-wococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-wococo" } +bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } +bridge-hub-wococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-wococo" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs new file mode 100644 index 00000000000..e20dcfa6b32 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs @@ -0,0 +1,94 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use asset_hub_rococo_emulated_chain; +pub use asset_hub_wococo_emulated_chain; +pub use bridge_hub_rococo_emulated_chain; +pub use bridge_hub_wococo_emulated_chain; +pub use rococo_emulated_chain; +pub use wococo_emulated_chain; + +use asset_hub_rococo_emulated_chain::AssetHubRococo; +use asset_hub_wococo_emulated_chain::AssetHubWococo; +use bridge_hub_rococo_emulated_chain::BridgeHubRococo; +use bridge_hub_wococo_emulated_chain::BridgeHubWococo; +use rococo_emulated_chain::Rococo; +use wococo_emulated_chain::Wococo; + +// Cumulus +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + impls::{BridgeHubMessageHandler, BridgeMessagesInstance2}, + xcm_emulator::{ + decl_test_bridges, decl_test_networks, decl_test_sender_receiver_accounts_parameter_types, + Chain, + }, +}; + +decl_test_networks! { + pub struct RococoMockNet { + relay_chain = Rococo, + parachains = vec![ + AssetHubRococo, + BridgeHubRococo, + ], + bridge = RococoWococoMockBridge + + }, + pub struct WococoMockNet { + relay_chain = Wococo, + parachains = vec![ + AssetHubWococo, + BridgeHubWococo, + ], + bridge = WococoRococoMockBridge + }, +} + +decl_test_bridges! { + pub struct RococoWococoMockBridge { + source = BridgeHubRococoPara, + target = BridgeHubWococoPara, + handler = RococoWococoMessageHandler + }, + pub struct WococoRococoMockBridge { + source = BridgeHubWococoPara, + target = BridgeHubRococoPara, + handler = WococoRococoMessageHandler + } +} + +type BridgeHubRococoRuntime = ::Runtime; +type BridgeHubWococoRuntime = ::Runtime; + +pub type RococoWococoMessageHandler = BridgeHubMessageHandler< + BridgeHubRococoRuntime, + BridgeHubWococoRuntime, + BridgeMessagesInstance2, +>; +pub type WococoRococoMessageHandler = BridgeHubMessageHandler< + BridgeHubWococoRuntime, + BridgeHubRococoRuntime, + BridgeMessagesInstance2, +>; + +decl_test_sender_receiver_accounts_parameter_types! { + RococoRelay { sender: ALICE, receiver: BOB }, + AssetHubRococoPara { sender: ALICE, receiver: BOB }, + BridgeHubRococoPara { sender: ALICE, receiver: BOB }, + WococoRelay { sender: ALICE, receiver: BOB }, + AssetHubWococoPara { sender: ALICE, receiver: BOB }, + BridgeHubWococoPara { sender: ALICE, receiver: BOB } +} diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml new file mode 100644 index 00000000000..a4360076d6b --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "westend-system-emulated-network" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Westend System emulated network" +publish = false + +[dependencies] +# Cumulus +emulated-integration-tests-common = { path = "../../common", default-features = false } +westend-emulated-chain = { path = "../../chains/relays/westend", default-features = false } +asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } +bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } +penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs new file mode 100644 index 00000000000..667b44a6986 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/westend-system/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use asset_hub_westend_emulated_chain; +pub use bridge_hub_westend_emulated_chain; +pub use penpal_emulated_chain; +pub use westend_emulated_chain; + +use asset_hub_westend_emulated_chain::AssetHubWestend; +use bridge_hub_westend_emulated_chain::BridgeHubWestend; +use penpal_emulated_chain::{PenpalA, PenpalB}; +use westend_emulated_chain::Westend; + +// Cumulus +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + xcm_emulator::{decl_test_networks, decl_test_sender_receiver_accounts_parameter_types}, +}; + +decl_test_networks! { + pub struct WestendMockNet { + relay_chain = Westend, + parachains = vec![ + AssetHubWestend, + BridgeHubWestend, + PenpalA, + PenpalB, + ], + bridge = () + }, +} + +decl_test_sender_receiver_accounts_parameter_types! { + WestendRelay { sender: ALICE, receiver: BOB }, + AssetHubWestendPara { sender: ALICE, receiver: BOB }, + BridgeHubWestendPara { sender: ALICE, receiver: BOB }, + PenpalAPara { sender: ALICE, receiver: BOB }, + PenpalBPara { sender: ALICE, receiver: BOB } +} diff --git a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml new file mode 100644 index 00000000000..a596617e82b --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "wococo-system-emulated-network" +version = "0.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Wococo System emulated network" +publish = false + +[dependencies] +# Cumulus +emulated-integration-tests-common = { path = "../../common", default-features = false } +wococo-emulated-chain = { path = "../../chains/relays/wococo" } +asset-hub-wococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-wococo" } +bridge-hub-wococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-wococo" } +penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs new file mode 100644 index 00000000000..5369afe7dff --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub use asset_hub_wococo_emulated_chain; +pub use bridge_hub_wococo_emulated_chain; +pub use wococo_emulated_chain; + +use asset_hub_wococo_emulated_chain::AssetHubWococo; +use bridge_hub_wococo_emulated_chain::BridgeHubWococo; +use penpal_emulated_chain::{PenpalA, PenpalB}; +use wococo_emulated_chain::Wococo; + +// Cumulus +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + xcm_emulator::{decl_test_networks, decl_test_sender_receiver_accounts_parameter_types}, +}; + +decl_test_networks! { + pub struct WococoMockNet { + relay_chain = Wococo, + parachains = vec![ + AssetHubWococo, + BridgeHubWococo, + PenpalA, + PenpalB, + ], + bridge = () + }, +} + +decl_test_sender_receiver_accounts_parameter_types! { + WococoRelay { sender: ALICE, receiver: BOB }, + AssetHubWococoPara { sender: ALICE, receiver: BOB }, + BridgeHubWococoPara { sender: ALICE, receiver: BOB }, + PenpalAPara { sender: ALICE, receiver: BOB }, + PenpalBPara { sender: ALICE, receiver: BOB } +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml new file mode 100644 index 00000000000..6e592f04ba1 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "asset-hub-rococo-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Rococo runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +assert_matches = "1.5.0" + +# Substrate +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} +pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } + +# Cumulus +asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } +parachains-common = { path = "../../../../../../parachains/common" } +asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } +emulated-integration-tests-common = { path = "../../../common", default-features = false} +rococo-system-emulated-network ={ path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs similarity index 67% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 42f54bdf49d..11380cd1e2d 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -13,34 +13,46 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub use asset_test_utils::xcm_helpers; pub use codec::Encode; + +// Substrate pub use frame_support::{ assert_err, assert_ok, pallet_prelude::Weight, sp_runtime::{AccountId32, DispatchError, DispatchResult}, traits::fungibles::Inspect, }; -pub use integration_tests_common::{ - constants::{ - asset_hub_rococo::ED as ASSET_HUB_ROCOCO_ED, rococo::ED as ROCOCO_ED, PROOF_SIZE_THRESHOLD, - REF_TIME_THRESHOLD, XCM_V3, - }, - test_parachain_is_trusted_teleporter, - xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, - AssetHubRococo, AssetHubRococoPallet, AssetHubRococoReceiver, AssetHubRococoSender, - BridgeHubRococo, BridgeHubRococoReceiver, PenpalRococoA, PenpalRococoAPallet, - PenpalRococoAReceiver, PenpalRococoASender, PenpalRococoB, PenpalRococoBPallet, Rococo, - RococoPallet, RococoReceiver, RococoSender, -}; -pub use parachains_common::{AccountId, Balance}; + +// Polkadot pub use xcm::{ prelude::{AccountId32 as AccountId32Junction, *}, v3::{Error, NetworkId::Rococo as RococoId}, }; -pub use xcm_emulator::{ - assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, - RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + +// Cumulus +pub use asset_test_utils::xcm_helpers; +pub use emulated_integration_tests_common::{ + test_parachain_is_trusted_teleporter, + xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + }, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, +}; +pub use parachains_common::{AccountId, Balance}; +pub use rococo_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + penpal_emulated_chain::PenpalAParaPallet as PenpalAPallet, + rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, + AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, + AssetHubRococoParaSender as AssetHubRococoSender, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, PenpalAPara as PenpalA, + PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, + RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, + RococoRelaySender as RococoSender, }; pub const ASSET_ID: u32 = 1; @@ -50,7 +62,7 @@ pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; -pub type SystemParaToParaTest = Test; +pub type SystemParaToParaTest = Test; /// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests pub fn relay_test_args(amount: Balance) -> TestArgs { diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/mod.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/mod.rs diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs similarity index 95% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 9c3378de226..76d93b2dbdb 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -294,14 +294,14 @@ fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { #[test] fn limited_reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalRococoA::para_id()); - let beneficiary_id = PenpalRococoAReceiver::get(); + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { sender: AssetHubRococoSender::get(), - receiver: PenpalRococoAReceiver::get(), + receiver: PenpalAReceiver::get(), args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; @@ -332,14 +332,14 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { #[test] fn reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalRococoA::para_id()); - let beneficiary_id = PenpalRococoAReceiver::get(); + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { sender: AssetHubRococoSender::get(), - receiver: PenpalRococoAReceiver::get(), + receiver: PenpalAReceiver::get(), args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; @@ -380,8 +380,8 @@ fn limited_reserve_transfer_asset_from_system_para_to_para() { ); // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalRococoA::para_id()); - let beneficiary_id = PenpalRococoAReceiver::get(); + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send = ASSET_MIN_BALANCE * 1000; let assets = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) @@ -389,7 +389,7 @@ fn limited_reserve_transfer_asset_from_system_para_to_para() { let system_para_test_args = TestContext { sender: AssetHubRococoSender::get(), - receiver: PenpalRococoAReceiver::get(), + receiver: PenpalAReceiver::get(), args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; @@ -416,8 +416,8 @@ fn reserve_transfer_asset_from_system_para_to_para() { ); // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalRococoA::para_id()); - let beneficiary_id = PenpalRococoAReceiver::get(); + let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send = ASSET_MIN_BALANCE * 1000; let assets = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) @@ -425,7 +425,7 @@ fn reserve_transfer_asset_from_system_para_to_para() { let system_para_test_args = TestContext { sender: AssetHubRococoSender::get(), - receiver: PenpalRococoAReceiver::get(), + receiver: PenpalAReceiver::get(), args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs similarity index 88% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/send.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index 195afcd34c7..7be0463d2ec 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -33,7 +33,7 @@ fn send_transact_as_superuser_from_relay_to_system_para_works() { #[test] fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let para_sovereign_account = AssetHubRococo::sovereign_account_id_of( - AssetHubRococo::sibling_location_of(PenpalRococoA::para_id()), + AssetHubRococo::sibling_location_of(PenpalA::para_id()), ); // Force create and mint assets for Parachain's sovereign account @@ -60,9 +60,8 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let native_asset = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), fee_amount).into(); - let root_origin = ::RuntimeOrigin::root(); - let system_para_destination = - PenpalRococoA::sibling_location_of(AssetHubRococo::para_id()).into(); + let root_origin = ::RuntimeOrigin::root(); + let system_para_destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()).into(); let xcm = xcm_transact_paid_execution( call, origin_kind, @@ -70,14 +69,14 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { para_sovereign_account.clone(), ); - PenpalRococoA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), bx!(xcm), )); - PenpalRococoA::assert_xcm_pallet_sent(); + PenpalA::assert_xcm_pallet_sent(); }); AssetHubRococo::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/set_xcm_versions.rs diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs similarity index 94% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/swap.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index f9da0bf946e..e08af50c14e 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -125,7 +125,7 @@ fn swap_locally_on_chain_using_foreign_assets() { let foreign_asset1_at_asset_hub_rococo = Box::new(MultiLocation { parents: 1, interior: X3( - Parachain(PenpalRococoA::para_id().into()), + Parachain(PenpalA::para_id().into()), PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into()), ), @@ -136,18 +136,18 @@ fn swap_locally_on_chain_using_foreign_assets() { .into(); let penpal_location = - MultiLocation { parents: 1, interior: X1(Parachain(PenpalRococoA::para_id().into())) }; + MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) }; // 1. Create asset on penpal: - PenpalRococoA::execute_with(|| { - assert_ok!(::Assets::create( - ::RuntimeOrigin::signed(PenpalRococoASender::get()), + PenpalA::execute_with(|| { + assert_ok!(::Assets::create( + ::RuntimeOrigin::signed(PenpalASender::get()), ASSET_ID.into(), - PenpalRococoASender::get().into(), + PenpalASender::get().into(), 1000, )); - assert!(::Assets::asset_exists(ASSET_ID)); + assert!(::Assets::asset_exists(ASSET_ID)); }); // 2. Create foreign asset on asset_hub_rococo: @@ -202,18 +202,18 @@ fn swap_locally_on_chain_using_foreign_assets() { ])); // Send XCM message from penpal => asset_hub_rococo - let sudo_penpal_origin = ::RuntimeOrigin::root(); - PenpalRococoA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + let sudo_penpal_origin = ::RuntimeOrigin::root(); + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( sudo_penpal_origin.clone(), bx!(assets_para_destination.clone()), bx!(xcm), )); - type RuntimeEvent = ::RuntimeEvent; + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - PenpalRococoA, + PenpalA, vec![ RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, ] diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-rococo/src/tests/teleport.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml new file mode 100644 index 00000000000..7080abc0a44 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "asset-hub-westend-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Asset Hub Westend runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } +assert_matches = "1.5.0" + +# Substrate +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false} +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} +pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} +pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false} +pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false} +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } + +# Polkadot +polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } +westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants", default-features = false } + +# Cumulus +parachains-common = { path = "../../../../../../parachains/common" } +asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-westend" } +asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } +cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../../pallets/dmp-queue" } +cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } +emulated-integration-tests-common = { path = "../../../common", default-features = false} +westend-system-emulated-network ={ path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs similarity index 69% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 0133cdbed5c..e52ad448c0b 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -13,8 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub use asset_test_utils::xcm_helpers; pub use codec::Encode; + +// Substrate pub use frame_support::{ assert_err, assert_ok, instances::Instance2, @@ -23,25 +24,36 @@ pub use frame_support::{ traits::fungibles::Inspect, BoundedVec, }; -pub use integration_tests_common::{ - constants::{ - asset_hub_westend::ED as ASSET_HUB_WESTEND_ED, westend::ED as WESTEND_ED, - PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, - }, - test_parachain_is_trusted_teleporter, - xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, - AssetHubWestend, AssetHubWestendPallet, AssetHubWestendReceiver, AssetHubWestendSender, - PenpalWestendA, PenpalWestendAPallet, PenpalWestendAReceiver, PenpalWestendASender, Westend, - WestendPallet, WestendReceiver, WestendSender, -}; -pub use parachains_common::{AccountId, Balance}; + +// Polkadot pub use xcm::{ prelude::{AccountId32 as AccountId32Junction, *}, v3::{Error, NetworkId::Westend as WestendId}, }; -pub use xcm_emulator::{ - assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, - RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + +// Cumulus +pub use asset_test_utils::xcm_helpers; +pub use emulated_integration_tests_common::{ + test_parachain_is_trusted_teleporter, + xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + }, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, +}; +pub use parachains_common::{AccountId, Balance}; +pub use westend_system_emulated_network::{ + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, + penpal_emulated_chain::PenpalAParaPallet as PenpalAPallet, + westend_emulated_chain::{genesis::ED as WESTEND_ED, WestendRelayPallet as WestendPallet}, + AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, + AssetHubWestendParaSender as AssetHubWestendSender, PenpalAPara as PenpalA, + PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, + WestendRelay as Westend, WestendRelayReceiver as WestendReceiver, + WestendRelaySender as WestendSender, }; pub const ASSET_ID: u32 = 1; @@ -51,7 +63,7 @@ pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; -pub type SystemParaToParaTest = Test; +pub type SystemParaToParaTest = Test; /// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests pub fn relay_test_args(amount: Balance) -> TestArgs { diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/mod.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/mod.rs diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs similarity index 95% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 9b646035c29..19a203897ad 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -294,14 +294,14 @@ fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { #[test] fn limited_reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()); - let beneficiary_id = PenpalWestendAReceiver::get(); + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { sender: AssetHubWestendSender::get(), - receiver: PenpalWestendAReceiver::get(), + receiver: PenpalAReceiver::get(), args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; @@ -336,14 +336,14 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { #[test] fn reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()); - let beneficiary_id = PenpalWestendAReceiver::get(); + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { sender: AssetHubWestendSender::get(), - receiver: PenpalWestendAReceiver::get(), + receiver: PenpalAReceiver::get(), args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; @@ -388,8 +388,8 @@ fn limited_reserve_transfer_asset_from_system_para_to_para() { ); // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()); - let beneficiary_id = PenpalWestendAReceiver::get(); + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send = ASSET_MIN_BALANCE * 1000; let assets = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) @@ -397,7 +397,7 @@ fn limited_reserve_transfer_asset_from_system_para_to_para() { let system_para_test_args = TestContext { sender: AssetHubWestendSender::get(), - receiver: PenpalWestendAReceiver::get(), + receiver: PenpalAReceiver::get(), args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; @@ -424,8 +424,8 @@ fn reserve_transfer_asset_from_system_para_to_para() { ); // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()); - let beneficiary_id = PenpalWestendAReceiver::get(); + let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send = ASSET_MIN_BALANCE * 1000; let assets = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) @@ -433,7 +433,7 @@ fn reserve_transfer_asset_from_system_para_to_para() { let system_para_test_args = TestContext { sender: AssetHubWestendSender::get(), - receiver: PenpalWestendAReceiver::get(), + receiver: PenpalAReceiver::get(), args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), }; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs similarity index 87% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index e603af685bb..bda9a3e69c4 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -33,7 +33,7 @@ fn send_transact_as_superuser_from_relay_to_system_para_works() { #[test] fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let para_sovereign_account = AssetHubWestend::sovereign_account_id_of( - AssetHubWestend::sibling_location_of(PenpalWestendA::para_id()), + AssetHubWestend::sibling_location_of(PenpalA::para_id()), ); // Force create and mint assets for Parachain's sovereign account @@ -60,9 +60,8 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let native_asset = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), fee_amount).into(); - let root_origin = ::RuntimeOrigin::root(); - let system_para_destination = - PenpalWestendA::sibling_location_of(AssetHubWestend::para_id()).into(); + let root_origin = ::RuntimeOrigin::root(); + let system_para_destination = PenpalA::sibling_location_of(AssetHubWestend::para_id()).into(); let xcm = xcm_transact_paid_execution( call, origin_kind, @@ -70,14 +69,14 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { para_sovereign_account.clone(), ); - PenpalWestendA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), bx!(xcm), )); - PenpalWestendA::assert_xcm_pallet_sent(); + PenpalA::assert_xcm_pallet_sent(); }); AssetHubWestend::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/set_xcm_versions.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/set_xcm_versions.rs diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs similarity index 92% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index 88d44d9a4fb..a8e19f9ef4b 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -114,7 +114,7 @@ fn swap_locally_on_chain_using_foreign_assets() { let foreign_asset1_at_asset_hub_westend = Box::new(MultiLocation { parents: 1, interior: X3( - Parachain(PenpalWestendA::para_id().into()), + Parachain(PenpalA::para_id().into()), PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into()), ), @@ -125,18 +125,18 @@ fn swap_locally_on_chain_using_foreign_assets() { .into(); let penpal_location = - MultiLocation { parents: 1, interior: X1(Parachain(PenpalWestendA::para_id().into())) }; + MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) }; // 1. Create asset on penpal: - PenpalWestendA::execute_with(|| { - assert_ok!(::Assets::create( - ::RuntimeOrigin::signed(PenpalWestendASender::get()), + PenpalA::execute_with(|| { + assert_ok!(::Assets::create( + ::RuntimeOrigin::signed(PenpalASender::get()), ASSET_ID.into(), - PenpalWestendASender::get().into(), + PenpalASender::get().into(), 1000, )); - assert!(::Assets::asset_exists(ASSET_ID)); + assert!(::Assets::asset_exists(ASSET_ID)); }); // 2. Create foreign asset on asset_hub_westend: @@ -190,26 +190,24 @@ fn swap_locally_on_chain_using_foreign_assets() { ])); // Send XCM message from penpal => asset_hub_westend - let sudo_penpal_origin = ::RuntimeOrigin::root(); - PenpalWestendA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + let sudo_penpal_origin = ::RuntimeOrigin::root(); + PenpalA::execute_with(|| { + assert_ok!(::PolkadotXcm::send( sudo_penpal_origin.clone(), bx!(assets_para_destination.clone()), bx!(xcm), )); - type RuntimeEvent = ::RuntimeEvent; + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - PenpalWestendA, + PenpalA, vec![ RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, ] ); }); - // One block for the MessageQueue to process the message. - AssetHubWestend::execute_with(|| {}); // Receive XCM message in Assets Parachain in the next block. AssetHubWestend::execute_with(|| { assert!(::ForeignAssets::asset_exists( diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs similarity index 99% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 4fe0062dafc..57e1b93f349 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -13,8 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![allow(dead_code)] // - use crate::*; use asset_hub_westend_runtime::xcm_config::XcmConfig as AssetHubWestendXcmConfig; use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; diff --git a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs similarity index 98% rename from cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs rename to cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index 7040d061f5b..32089f7ecec 100644 --- a/cumulus/parachains/integration-tests/emulated/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -14,8 +14,8 @@ // limitations under the License. use crate::*; +use emulated_integration_tests_common::accounts::{ALICE, BOB}; use frame_support::traits::fungibles::{Create, Inspect, Mutate}; -use integration_tests_common::constants::accounts::{ALICE, BOB}; use polkadot_runtime_common::impls::VersionedLocatableAsset; use xcm_executor::traits::ConvertLocation; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml new file mode 100644 index 00000000000..035d9c10793 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "bridge-hub-rococo-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Rococo runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } + +# Substrate +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Bridges +pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false} +bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false} + +# Cumulus +asset-test-utils = { path = "../../../../../../parachains/runtimes/assets/test-utils" } +parachains-common = { path = "../../../../../../parachains/common" } +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false} +cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} +bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } +emulated-integration-tests-common = { path = "../../../common", default-features = false} +rococo-wococo-system-emulated-network ={ path = "../../../networks/rococo-wococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs new file mode 100644 index 00000000000..19e10d23bbb --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -0,0 +1,60 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +pub use frame_support::assert_ok; + +// Polkadot +pub use xcm::{ + prelude::{AccountId32 as AccountId32Junction, *}, + v3::{ + Error, + NetworkId::{Rococo as RococoId, Wococo as WococoId}, + }, +}; + +// Bridges +pub use bp_messages::LaneId; + +// Cumulus +pub use emulated_integration_tests_common::{ + test_parachain_is_trusted_teleporter, + xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + }, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, +}; +pub use parachains_common::{AccountId, Balance}; +pub use rococo_wococo_system_emulated_network::{ + bridge_hub_rococo_emulated_chain::{ + genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoParaPallet as BridgeHubRococoPallet, + }, + rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, + AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, + AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWococoPara as AssetHubWococo, + BridgeHubRococoPara as BridgeHubRococo, BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, + BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWococoPara as BridgeHubWococo, + RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, + RococoRelaySender as RococoSender, +}; + +pub const ASSET_ID: u32 = 1; +pub const ASSET_MIN_BALANCE: u128 = 1000; +pub const ASSETS_PALLET_ID: u8 = 50; + +#[cfg(test)] +mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/example.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/mod.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-rococo/src/tests/teleport.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml new file mode 100644 index 00000000000..62b969b682f --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "bridge-hub-westend-integration-tests" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Bridge Hub Westend runtime integration tests with xcm-emulator" +publish = false + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } + +# Substrate +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Bridges +pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false} +bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false} + +# Cumulus +asset-test-utils = { path = "../../../../../../parachains/runtimes/assets/test-utils" } +parachains-common = { path = "../../../../../../parachains/common" } +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false} +cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} +bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } +emulated-integration-tests-common = { path = "../../../common", default-features = false} +westend-system-emulated-network ={ path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs new file mode 100644 index 00000000000..f406a73d18d --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -0,0 +1,56 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Substrate +pub use frame_support::assert_ok; + +// Polkadot +pub use xcm::{ + prelude::{AccountId32 as AccountId32Junction, *}, + v3::{Error, NetworkId::Rococo as RococoId}, +}; + +// Bridges +pub use bp_messages::LaneId; + +// Cumulus +pub use emulated_integration_tests_common::{ + test_parachain_is_trusted_teleporter, + xcm_emulator::{ + assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, + RelayChain as Relay, Test, TestArgs, TestContext, TestExt, + }, + xcm_helpers::{xcm_transact_paid_execution, xcm_transact_unpaid_execution}, + PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, +}; +pub use parachains_common::{AccountId, Balance}; +pub use westend_system_emulated_network::{ + bridge_hub_westend_emulated_chain::{ + genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, + }, + westend_emulated_chain::{genesis::ED as ROCOCO_ED, WestendRelayPallet as WestendPallet}, + AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, + AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubWestendPara as BridgeHubWestend, + BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, + BridgeHubWestendParaSender as BridgeHubWestendSender, WestendRelay as Westend, + WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, +}; + +pub const ASSET_ID: u32 = 1; +pub const ASSET_MIN_BALANCE: u128 = 1000; +pub const ASSETS_PALLET_ID: u8 = 50; + +#[cfg(test)] +mod tests; diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs similarity index 96% rename from cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/example.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs index e15a0dedab9..1fdd9441e48 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/example.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs @@ -30,7 +30,7 @@ fn example() { UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: RococoId, - destination: X1(Parachain(AssetHubRococo::para_id().into())), + destination: X1(Parachain(AssetHubWestend::para_id().into())), xcm: remote_xcm, }, ])); diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs similarity index 100% rename from cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/mod.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs diff --git a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs similarity index 95% rename from cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/teleport.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs index 8dff6c29295..32639b8614b 100644 --- a/cumulus/parachains/integration-tests/emulated/bridges/bridge-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -18,7 +18,7 @@ use bridge_hub_westend_runtime::xcm_config::XcmConfig; #[test] fn teleport_to_other_system_parachains_works() { - let amount = BRIDGE_HUB_WESTEND_ED * 100; + let amount = BRIDGE_HUB_ROCOCO_ED * 100; let native_asset: MultiAssets = (Parent, amount).into(); test_parachain_is_trusted_teleporter!( diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index f11a3d5c5af..7ff5512d214 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -170,30 +170,21 @@ pub trait Network { relay_parent_number: u32, parent_head_data: HeadData, ) -> ParachainInherentData; -} - -pub trait NetworkComponent { - type Network: Network; - fn send_horizontal_messages)>>( to_para_id: u32, iter: I, ) { HORIZONTAL_MESSAGES.with(|b| { b.borrow_mut() - .get_mut(Self::Network::name()) + .get_mut(Self::name()) .unwrap() .push_back((to_para_id, iter.collect())) }); } fn send_upward_message(from_para_id: u32, msg: Vec) { - UPWARD_MESSAGES.with(|b| { - b.borrow_mut() - .get_mut(Self::Network::name()) - .unwrap() - .push_back((from_para_id, msg)) - }); + UPWARD_MESSAGES + .with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().push_back((from_para_id, msg))); } fn send_downward_messages( @@ -202,19 +193,19 @@ pub trait NetworkComponent { ) { DOWNWARD_MESSAGES.with(|b| { b.borrow_mut() - .get_mut(Self::Network::name()) + .get_mut(Self::name()) .unwrap() .push_back((to_para_id, iter.collect())) }); } fn send_bridged_messages(msg: BridgeMessage) { - BRIDGED_MESSAGES - .with(|b| b.borrow_mut().get_mut(Self::Network::name()).unwrap().push_back(msg)); + BRIDGED_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().push_back(msg)); } } -pub trait Chain: TestExt + NetworkComponent { +pub trait Chain: TestExt { + type Network: Network; type Runtime: SystemConfig; type RuntimeCall; type RuntimeOrigin; @@ -234,6 +225,8 @@ pub trait RelayChain: Chain { type SovereignAccountOf: ConvertLocation>; type MessageProcessor: ProcessMessage + ServiceQueues; + fn init(); + fn child_location_of(id: ParaId) -> MultiLocation { (Ancestor(0), ParachainJunction(id.into())).into() } @@ -348,7 +341,6 @@ macro_rules! decl_test_relay_chains { on_init = $on_init:expr, runtime = $runtime:ident, core = { - MessageProcessor: $mp:path, SovereignAccountOf: $sovereign_acc_of:path, }, pallets = { @@ -361,9 +353,10 @@ macro_rules! decl_test_relay_chains { ) => { $( #[derive(Clone)] - pub struct $name; + pub struct $name($crate::PhantomData); - impl $crate::Chain for $name { + impl $crate::Chain for $name { + type Network = N; type Runtime = $runtime::Runtime; type RuntimeCall = $runtime::RuntimeCall; type RuntimeOrigin = $runtime::RuntimeOrigin; @@ -382,27 +375,35 @@ macro_rules! decl_test_relay_chains { } } - impl $crate::RelayChain for $name { + impl $crate::RelayChain for $name { type SovereignAccountOf = $sovereign_acc_of; - type MessageProcessor = $mp; + type MessageProcessor = $crate::DefaultRelayMessageProcessor<$name>; + + fn init() { + use $crate::TestExt; + // Initialize the thread local variable + $crate::paste::paste! { + [].with(|v| *v.borrow_mut() = Self::build_new_ext($genesis)); + } + } } $crate::paste::paste! { - pub trait [<$name Pallet>] { + pub trait [<$name RelayPallet>] { $( type $pallet_name; )? } - impl [<$name Pallet>] for $name { + impl [<$name RelayPallet>] for $name { $( type $pallet_name = $pallet_path; )? } } - $crate::__impl_test_ext_for_relay_chain!($name, $genesis, $on_init, $api_version); - $crate::__impl_check_assertion!($name); + $crate::__impl_test_ext_for_relay_chain!($name, N, $genesis, $on_init, $api_version); + $crate::__impl_check_assertion!($name, N); )+ }; } @@ -410,10 +411,11 @@ macro_rules! decl_test_relay_chains { #[macro_export] macro_rules! __impl_test_ext_for_relay_chain { // entry point: generate ext name - ($name:ident, $genesis:expr, $on_init:expr, $api_version:tt) => { + ($name:ident, $network:ident, $genesis:expr, $on_init:expr, $api_version:tt) => { $crate::paste::paste! { $crate::__impl_test_ext_for_relay_chain!( @impl $name, + $network, $genesis, $on_init, [], @@ -423,10 +425,10 @@ macro_rules! __impl_test_ext_for_relay_chain { } }; // impl - (@impl $name:ident, $genesis:expr, $on_init:expr, $api_version:ident, $local_ext:ident, $global_ext:ident) => { + (@impl $name:ident, $network:ident, $genesis:expr, $on_init:expr, $api_version:ident, $local_ext:ident, $global_ext:ident) => { thread_local! { pub static $local_ext: $crate::RefCell<$crate::TestExternalities> - = $crate::RefCell::new(<$name as $crate::TestExt>::build_new_ext($genesis)); + = $crate::RefCell::new($crate::TestExternalities::new($genesis)); } $crate::lazy_static! { @@ -434,9 +436,9 @@ macro_rules! __impl_test_ext_for_relay_chain { = $crate::Mutex::new($crate::RefCell::new($crate::HashMap::new())); } - impl $crate::TestExt for $name { + impl<$network: $crate::Network> $crate::TestExt for $name<$network> { fn build_new_ext(storage: $crate::Storage) -> $crate::TestExternalities { - use $crate::{sp_tracing, NetworkComponent, Network, Chain, TestExternalities}; + use $crate::{sp_tracing, Network, Chain, TestExternalities}; let mut ext = TestExternalities::new(storage); @@ -453,7 +455,7 @@ macro_rules! __impl_test_ext_for_relay_chain { } fn new_ext() -> $crate::TestExternalities { - <$name>::build_new_ext($genesis) + Self::build_new_ext($genesis) } fn move_ext_out(id: &'static str) { @@ -504,13 +506,13 @@ macro_rules! __impl_test_ext_for_relay_chain { } fn reset_ext() { - $local_ext.with(|v| *v.borrow_mut() = <$name>::build_new_ext($genesis)); + $local_ext.with(|v| *v.borrow_mut() = Self::build_new_ext($genesis)); } fn execute_with(execute: impl FnOnce() -> R) -> R { - use $crate::{Chain, NetworkComponent, Network}; + use $crate::{Chain, Network}; // Make sure the Network is initialized - <$name as NetworkComponent>::Network::init(); + <$network>::init(); // Execute let r = $local_ext.with(|v| v.borrow_mut().execute_with(execute)); @@ -521,7 +523,7 @@ macro_rules! __impl_test_ext_for_relay_chain { use $crate::polkadot_primitives::runtime_api::runtime_decl_for_parachain_host::$api_version; //TODO: mark sent count & filter out sent msg - for para_id in<$name as NetworkComponent>::Network::para_ids() { + for para_id in <$network>::para_ids() { // downward messages let downward_messages = ::Runtime::dmq_contents(para_id.into()) .into_iter() @@ -529,7 +531,7 @@ macro_rules! __impl_test_ext_for_relay_chain { if downward_messages.len() == 0 { continue; } - <$name>::send_downward_messages(para_id, downward_messages.into_iter()); + <$network>::send_downward_messages(para_id, downward_messages.into_iter()); // Note: no need to handle horizontal messages, as the // simulator directly sends them to dest (not relayed). @@ -545,7 +547,7 @@ macro_rules! __impl_test_ext_for_relay_chain { }) }); - <$name as NetworkComponent>::Network::process_messages(); + <$network>::process_messages(); r } @@ -574,7 +576,7 @@ macro_rules! decl_test_parachains { XcmpMessageHandler: $xcmp_message_handler:path, LocationToAccountId: $location_to_account:path, ParachainInfo: $parachain_info:path, - MessageProcessor: $message_processor:path, + // MessageProcessor: $message_processor:path, }, pallets = { $($pallet_name:ident: $pallet_path:path,)* @@ -586,14 +588,15 @@ macro_rules! decl_test_parachains { ) => { $( #[derive(Clone)] - pub struct $name; + pub struct $name($crate::PhantomData); - impl $crate::Chain for $name { + impl $crate::Chain for $name { type Runtime = $runtime::Runtime; type RuntimeCall = $runtime::RuntimeCall; type RuntimeOrigin = $runtime::RuntimeOrigin; type RuntimeEvent = $runtime::RuntimeEvent; type System = $crate::SystemPallet::; + type Network = N; fn account_data_of(account: $crate::AccountIdOf) -> $crate::AccountData<$crate::Balance> { ::ext_wrapper(|| $crate::SystemPallet::::account(account).data.into()) @@ -607,17 +610,21 @@ macro_rules! decl_test_parachains { } } - impl $crate::Parachain for $name { + impl $crate::Parachain for $name { type XcmpMessageHandler = $xcmp_message_handler; type LocationToAccountId = $location_to_account; type ParachainSystem = $crate::ParachainSystemPallet<::Runtime>; type ParachainInfo = $parachain_info; - type MessageProcessor = $message_processor; + type MessageProcessor = $crate::DefaultParaMessageProcessor<$name>; // We run an empty block during initialisation to open HRMP channels // and have them ready for the next block fn init() { - use $crate::{Chain, HeadData, Network, NetworkComponent, Hooks, Encode, Parachain, TestExt}; + use $crate::{Chain, HeadData, Network, Hooks, Encode, Parachain, TestExt}; + // Initialize the thread local variable + $crate::paste::paste! { + [].with(|v| *v.borrow_mut() = Self::build_new_ext($genesis)); + } // Set the last block head for later use in the next block Self::set_last_head(); // Initialize a new block @@ -627,21 +634,21 @@ macro_rules! decl_test_parachains { } fn new_block() { - use $crate::{Chain, HeadData, Network, NetworkComponent, Hooks, Encode, Parachain, TestExt}; + use $crate::{Chain, HeadData, Network, Hooks, Encode, Parachain, TestExt}; let para_id = Self::para_id().into(); Self::ext_wrapper(|| { // Increase Relay Chain block number - let mut relay_block_number = <$name as NetworkComponent>::Network::relay_block_number(); + let mut relay_block_number = N::relay_block_number(); relay_block_number += 1; - <$name as NetworkComponent>::Network::set_relay_block_number(relay_block_number); + N::set_relay_block_number(relay_block_number); // Initialize a new Parachain block let mut block_number = ::System::block_number(); block_number += 1; let parent_head_data = $crate::LAST_HEAD.with(|b| b.borrow_mut() - .get_mut(::Network::name()) + .get_mut(N::name()) .expect("network not initialized?") .get(¶_id) .expect("network not initialized?") @@ -652,13 +659,13 @@ macro_rules! decl_test_parachains { let _ = ::ParachainSystem::set_validation_data( ::RuntimeOrigin::none(), - <$name as NetworkComponent>::Network::hrmp_channel_parachain_inherent_data(para_id, relay_block_number, parent_head_data), + N::hrmp_channel_parachain_inherent_data(para_id, relay_block_number, parent_head_data), ); }); } fn finalize_block() { - use $crate::{Chain, Encode, Hooks, Network, NetworkComponent, Parachain, TestExt}; + use $crate::{Chain, Encode, Hooks, Network, Parachain, TestExt}; Self::ext_wrapper(|| { let block_number = ::System::block_number(); @@ -670,7 +677,7 @@ macro_rules! decl_test_parachains { fn set_last_head() { - use $crate::{Chain, Encode, HeadData, Network, NetworkComponent, Parachain, TestExt}; + use $crate::{Chain, Encode, HeadData, Network, Parachain, TestExt}; let para_id = Self::para_id().into(); @@ -678,7 +685,7 @@ macro_rules! decl_test_parachains { // Store parent head data for use later. let created_header = ::System::finalize(); $crate::LAST_HEAD.with(|b| b.borrow_mut() - .get_mut(::Network::name()) + .get_mut(N::name()) .expect("network not initialized?") .insert(para_id, HeadData(created_header.encode())) ); @@ -687,21 +694,21 @@ macro_rules! decl_test_parachains { } $crate::paste::paste! { - pub trait [<$name Pallet>] { + pub trait [<$name ParaPallet>] { $( type $pallet_name; )* } - impl [<$name Pallet>] for $name { + impl [<$name ParaPallet>] for $name { $( type $pallet_name = $pallet_path; )* } } - $crate::__impl_test_ext_for_parachain!($name, $genesis, $on_init); - $crate::__impl_check_assertion!($name); + $crate::__impl_test_ext_for_parachain!($name, N, $genesis, $on_init); + $crate::__impl_check_assertion!($name, N); )+ }; } @@ -709,16 +716,16 @@ macro_rules! decl_test_parachains { #[macro_export] macro_rules! __impl_test_ext_for_parachain { // entry point: generate ext name - ($name:ident, $genesis:expr, $on_init:expr) => { + ($name:ident, $network:ident, $genesis:expr, $on_init:expr) => { $crate::paste::paste! { - $crate::__impl_test_ext_for_parachain!(@impl $name, $genesis, $on_init, [], []); + $crate::__impl_test_ext_for_parachain!(@impl $name, $network, $genesis, $on_init, [], []); } }; // impl - (@impl $name:ident, $genesis:expr, $on_init:expr, $local_ext:ident, $global_ext:ident) => { + (@impl $name:ident, $network:ident, $genesis:expr, $on_init:expr, $local_ext:ident, $global_ext:ident) => { thread_local! { pub static $local_ext: $crate::RefCell<$crate::TestExternalities> - = $crate::RefCell::new(<$name as $crate::TestExt>::build_new_ext($genesis)); + = $crate::RefCell::new($crate::TestExternalities::new($genesis)); } $crate::lazy_static! { @@ -726,7 +733,7 @@ macro_rules! __impl_test_ext_for_parachain { = $crate::Mutex::new($crate::RefCell::new($crate::HashMap::new())); } - impl $crate::TestExt for $name { + impl<$network: $crate::Network> $crate::TestExt for $name<$network> { fn build_new_ext(storage: $crate::Storage) -> $crate::TestExternalities { let mut ext = $crate::TestExternalities::new(storage); @@ -743,7 +750,7 @@ macro_rules! __impl_test_ext_for_parachain { } fn new_ext() -> $crate::TestExternalities { - <$name>::build_new_ext($genesis) + Self::build_new_ext($genesis) } fn move_ext_out(id: &'static str) { @@ -794,14 +801,14 @@ macro_rules! __impl_test_ext_for_parachain { } fn reset_ext() { - $local_ext.with(|v| *v.borrow_mut() = <$name>::build_new_ext($genesis)); + $local_ext.with(|v| *v.borrow_mut() = Self::build_new_ext($genesis)); } fn execute_with(execute: impl FnOnce() -> R) -> R { - use $crate::{Chain, Get, Hooks, NetworkComponent, Network, Parachain, Encode}; + use $crate::{Chain, Get, Hooks, Network, Parachain, Encode}; // Make sure the Network is initialized - <$name as NetworkComponent>::Network::init(); + <$network>::init(); // Initialize a new block Self::new_block(); @@ -812,7 +819,7 @@ macro_rules! __impl_test_ext_for_parachain { // Finalize the block Self::finalize_block(); - let para_id = <$name>::para_id().into(); + let para_id = Self::para_id().into(); // Send messages if needed $local_ext.with(|v| { @@ -828,27 +835,27 @@ macro_rules! __impl_test_ext_for_parachain { let collation_info = ::ParachainSystem::collect_collation_info(&mock_header); // send upward messages - let relay_block_number = <$name as NetworkComponent>::Network::relay_block_number(); + let relay_block_number = <$network>::relay_block_number(); for msg in collation_info.upward_messages.clone() { - <$name>::send_upward_message(para_id, msg); + <$network>::send_upward_message(para_id, msg); } // send horizontal messages for msg in collation_info.horizontal_messages { - <$name>::send_horizontal_messages( + <$network>::send_horizontal_messages( msg.recipient.into(), vec![(para_id.into(), relay_block_number, msg.data)].into_iter(), ); } // get bridge messages - type NetworkBridge = <<$name as NetworkComponent>::Network as $crate::Network>::Bridge; + type NetworkBridge<$network> = <$network as $crate::Network>::Bridge; - let bridge_messages = <::Handler as $crate::BridgeMessageHandler>::get_source_outbound_messages(); + let bridge_messages = < as $crate::Bridge>::Handler as $crate::BridgeMessageHandler>::get_source_outbound_messages(); // send bridged messages for msg in bridge_messages { - <$name>::send_bridged_messages(msg); + <$network>::send_bridged_messages(msg); } // log events @@ -864,7 +871,7 @@ macro_rules! __impl_test_ext_for_parachain { // provide inbound DMP/HRMP messages through a side-channel. // normally this would come through the `set_validation_data`, // but we go around that. - <$name as NetworkComponent>::Network::process_messages(); + <$network>::process_messages(); r } @@ -886,8 +893,8 @@ macro_rules! decl_test_networks { ( $( pub struct $name:ident { - relay_chain = $relay_chain:ty, - parachains = vec![ $( $parachain:ty, )* ], + relay_chain = $relay_chain:ident, + parachains = vec![ $( $parachain:ident, )* ], bridge = $bridge:ty } ), @@ -895,10 +902,11 @@ macro_rules! decl_test_networks { $(,)? ) => { $( + #[derive(Clone)] pub struct $name; impl $crate::Network for $name { - type Relay = $relay_chain; + type Relay = $relay_chain; type Bridge = $bridge; fn name() -> &'static str { @@ -916,8 +924,8 @@ macro_rules! decl_test_networks { $crate::BRIDGED_MESSAGES.with(|b| b.borrow_mut().remove(Self::name())); $crate::LAST_HEAD.with(|b| b.borrow_mut().remove(Self::name())); - <$relay_chain>::reset_ext(); - $( <$parachain>::reset_ext(); )* + <$relay_chain>::reset_ext(); + $( <$parachain>::reset_ext(); )* } fn init() { @@ -932,13 +940,14 @@ macro_rules! decl_test_networks { $crate::PARA_IDS.with(|b| b.borrow_mut().insert(Self::name().to_string(), Self::para_ids())); $crate::LAST_HEAD.with(|b| b.borrow_mut().insert(Self::name().to_string(), $crate::HashMap::new())); - $( <$parachain as $crate::Parachain>::init(); )* + <$relay_chain as $crate::RelayChain>::init(); + $( <$parachain as $crate::Parachain>::init(); )* } } fn para_ids() -> Vec { vec![$( - <$parachain as $crate::Parachain>::para_id().into(), + <$parachain as $crate::Parachain>::para_id().into(), )*] } @@ -976,7 +985,7 @@ macro_rules! decl_test_networks { while let Some((to_para_id, messages)) = $crate::DOWNWARD_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().pop_front()) { $( - let para_id: u32 = <$parachain>::para_id().into(); + let para_id: u32 = <$parachain>::para_id().into(); if $crate::PARA_IDS.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().contains(&to_para_id)) && para_id == to_para_id { let mut msg_dedup: Vec<(RelayChainBlockNumber, Vec)> = Vec::new(); @@ -986,21 +995,21 @@ macro_rules! decl_test_networks { msg_dedup.dedup(); let msgs = msg_dedup.clone().into_iter().filter(|m| { - !$crate::DMP_DONE.with(|b| b.borrow().get(stringify!($name)) + !$crate::DMP_DONE.with(|b| b.borrow().get(Self::name()) .unwrap_or(&mut $crate::VecDeque::new()) .contains(&(to_para_id, m.0, m.1.clone())) ) }).collect::)>>(); - use $crate::{ProcessMessage, CumulusAggregateMessageOrigin, BoundedSlice, WeightMeter, TestExt}; + use $crate::{ProcessMessage, CumulusAggregateMessageOrigin, BoundedSlice, WeightMeter}; for (block, msg) in msgs.clone().into_iter() { let mut weight_meter = WeightMeter::new(); - <$parachain>::ext_wrapper(|| { - let _ = <$parachain as Parachain>::MessageProcessor::process_message( + <$parachain>::ext_wrapper(|| { + let _ = <$parachain as Parachain>::MessageProcessor::process_message( &msg[..], $crate::CumulusAggregateMessageOrigin::Parent, &mut weight_meter, - &mut msg.using_encoded(sp_core::blake2_256), + &mut msg.using_encoded($crate::blake2_256), ); }); $crate::log::debug!(target: concat!("dmp::", stringify!($name)) , "DMP messages processed {:?} to para_id {:?}", msgs.clone(), &to_para_id); @@ -1012,19 +1021,19 @@ macro_rules! decl_test_networks { } fn process_horizontal_messages() { - use $crate::{XcmpMessageHandler, ServiceQueues, Bounded}; + use $crate::{XcmpMessageHandler, ServiceQueues, Bounded, Parachain, TestExt}; while let Some((to_para_id, messages)) = $crate::HORIZONTAL_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().pop_front()) { let iter = messages.iter().map(|(p, b, m)| (*p, *b, &m[..])).collect::>().into_iter(); $( - let para_id: u32 = <$parachain>::para_id().into(); + let para_id: u32 = <$parachain>::para_id().into(); if $crate::PARA_IDS.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().contains(&to_para_id)) && para_id == to_para_id { - <$parachain>::ext_wrapper(|| { - <$parachain as Parachain>::XcmpMessageHandler::handle_xcmp_messages(iter.clone(), $crate::Weight::MAX); + <$parachain>::ext_wrapper(|| { + <$parachain as Parachain>::XcmpMessageHandler::handle_xcmp_messages(iter.clone(), $crate::Weight::MAX); // Nudge the MQ pallet to process immediately instead of in the next block. - let _ = <$parachain as Parachain>::MessageProcessor::service_queues($crate::Weight::MAX); + let _ = <$parachain as Parachain>::MessageProcessor::service_queues($crate::Weight::MAX); }); $crate::log::debug!(target: concat!("hrmp::", stringify!($name)) , "HRMP messages processed {:?} to para_id {:?}", &messages, &to_para_id); } @@ -1037,8 +1046,8 @@ macro_rules! decl_test_networks { while let Some((from_para_id, msg)) = $crate::UPWARD_MESSAGES.with(|b| b.borrow_mut().get_mut(Self::name()).unwrap().pop_front()) { let mut weight_meter = WeightMeter::new(); - <$relay_chain>::ext_wrapper(|| { - let _ = <$relay_chain as $crate::RelayChain>::MessageProcessor::process_message( + <$relay_chain>::ext_wrapper(|| { + let _ = <$relay_chain as $crate::RelayChain>::MessageProcessor::process_message( &msg[..], from_para_id.into(), &mut weight_meter, @@ -1121,13 +1130,13 @@ macro_rules! decl_test_networks { } } - impl $crate::NetworkComponent for $relay_chain { - type Network = $name; + $crate::paste::paste! { + pub type [<$relay_chain Relay>] = $relay_chain<$name>; } $( - impl $crate::NetworkComponent for $parachain { - type Network = $name; + $crate::paste::paste! { + pub type [<$parachain Para>] = $parachain<$name>; } )* )+ @@ -1139,9 +1148,9 @@ macro_rules! decl_test_bridges { ( $( pub struct $name:ident { - source = $source:ty, - target = $target:ty, - handler = $handler:ty + source = $source:ident, + target = $target:ident, + handler = $handler:ident } ), + @@ -1157,10 +1166,10 @@ macro_rules! decl_test_bridges { type Handler = $handler; fn init() { - use $crate::{NetworkComponent, Network}; + use $crate::{Network, Parachain}; // Make sure source and target `Network` have been initialized - <$source as NetworkComponent>::Network::init(); - <$target as NetworkComponent>::Network::init(); + <$source as Chain>::Network::init(); + <$target as Chain>::Network::init(); } } )+ @@ -1169,10 +1178,11 @@ macro_rules! decl_test_bridges { #[macro_export] macro_rules! __impl_check_assertion { - ($chain:ident) => { - impl - $crate::CheckAssertion for $chain + ($chain:ident, $network:ident) => { + impl<$network, Origin, Destination, Hops, Args> + $crate::CheckAssertion for $chain<$network> where + $network: $crate::Network, Origin: $crate::Chain + Clone, Destination: $crate::Chain + Clone, Origin::RuntimeOrigin: @@ -1185,9 +1195,9 @@ macro_rules! __impl_check_assertion { fn check_assertion(test: $crate::Test) { use $crate::TestExt; - let chain_name = std::any::type_name::<$chain>(); + let chain_name = std::any::type_name::<$chain<$network>>(); - <$chain>::execute_with(|| { + <$chain<$network>>::execute_with(|| { if let Some(dispatchable) = test.hops_dispatchable.get(chain_name) { $crate::assert_ok!(dispatchable(test.clone())); } @@ -1213,7 +1223,6 @@ macro_rules! assert_expected_events { let mut event_message: Vec = Vec::new(); for (index, event) in events.iter().enumerate() { - $crate::log::debug!(target: concat!("events::", stringify!($chain)), "{:?}", event); // Have to reset the variable to override a previous partial match meet_conditions = true; match event { @@ -1259,7 +1268,14 @@ macro_rules! assert_expected_events { ) ); } else if !event_received { - message.push(format!("\n\n{}::\x1b[31m{}\x1b[0m was never received. All events:\n{:#?}", stringify!($chain), stringify!($event_pat), <$chain>::events())); + message.push( + format!( + "\n\n{}::\x1b[31m{}\x1b[0m was never received. All events:\n{:#?}", + stringify!($chain), + stringify!($event_pat), + <$chain as $crate::Chain>::events(), + ) + ); } else { // If we find a perfect match we remove the event to avoid being potentially assessed multiple times events.remove(index_match); -- GitLab From b8acc57c503f0b8fa12bb0c01517f63fc7ea6996 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 8 Nov 2023 17:31:37 +0100 Subject: [PATCH 011/199] `sc-chain-spec`: add support for custom host functions (#2190) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Genesis building in runtime may involve calling some custom host functions. This PR allows to pass `HostFunctions` into the `ChainSpec` struct, which in turn are passed to `WasmExecutor`. The `ChainSpec` now has extended host functions type parameter: ``` pub struct ChainSpec ``` which will be combined with the default set (`sp_io::SubstrateHostFunctions`) in an instance of `WasmExecutor` used to build the genesis config. Fix for #2188 --------- Co-authored-by: Davide Galassi Co-authored-by: Bastian Köcher --- .../bin/utils/chain-spec-builder/src/lib.rs | 3 +- substrate/client/chain-spec/src/chain_spec.rs | 86 ++++++++++++------- .../chain-spec/src/genesis_config_builder.rs | 27 ++++-- 3 files changed, 79 insertions(+), 37 deletions(-) diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 50a8dd08996..6f21b68c368 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -338,7 +338,8 @@ pub fn generate_chain_spec_for_runtime(cmd: &RuntimeCmd) -> Result { - let caller = GenesisConfigBuilderRuntimeCaller::new(&code[..]); + let caller: GenesisConfigBuilderRuntimeCaller = + GenesisConfigBuilderRuntimeCaller::new(&code[..]); let default_config = caller .get_default_config() .map_err(|e| format!("getting default config from runtime should work: {e}"))?; diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index a44a3b4fbd9..8d97d941022 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -19,8 +19,8 @@ //! Substrate chain configurations. #![warn(missing_docs)] use crate::{ - extension::GetExtension, ChainType, GenesisConfigBuilderRuntimeCaller as RuntimeCaller, - Properties, RuntimeGenesis, + extension::GetExtension, genesis_config_builder::HostFunctions, ChainType, + GenesisConfigBuilderRuntimeCaller as RuntimeCaller, Properties, RuntimeGenesis, }; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; @@ -122,7 +122,10 @@ impl GenesisSource { } } -impl BuildStorage for ChainSpec { +impl BuildStorage for ChainSpec +where + EHF: HostFunctions, +{ fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { match self.genesis.resolve()? { #[allow(deprecated)] @@ -158,7 +161,7 @@ impl BuildStorage for ChainSpec { json_blob: RuntimeGenesisConfigJson::Config(config), code, }) => { - RuntimeCaller::new(&code[..]) + RuntimeCaller::::new(&code[..]) .get_storage_for_config(config)? .assimilate_storage(storage)?; storage @@ -169,7 +172,7 @@ impl BuildStorage for ChainSpec { json_blob: RuntimeGenesisConfigJson::Patch(patch), code, }) => { - RuntimeCaller::new(&code[..]) + RuntimeCaller::::new(&code[..]) .get_storage_for_patch(patch)? .assimilate_storage(storage)?; storage @@ -322,7 +325,7 @@ struct ClientSpec { pub type NoExtension = Option<()>; /// Builder for creating [`ChainSpec`] instances. -pub struct ChainSpecBuilder { +pub struct ChainSpecBuilder { code: Vec, extensions: E, name: String, @@ -334,10 +337,10 @@ pub struct ChainSpecBuilder { protocol_id: Option, fork_id: Option, properties: Option, - _genesis: PhantomData, + _genesis: PhantomData<(G, EHF)>, } -impl ChainSpecBuilder { +impl ChainSpecBuilder { /// Creates a new builder instance with no defaults. pub fn new(code: &[u8], extensions: E) -> Self { Self { @@ -429,7 +432,7 @@ impl ChainSpecBuilder { } /// Builds a [`ChainSpec`] instance using the provided settings. - pub fn build(self) -> ChainSpec { + pub fn build(self) -> ChainSpec { let client_spec = ClientSpec { name: self.name, id: self.id, @@ -448,23 +451,33 @@ impl ChainSpecBuilder { ChainSpec { client_spec, genesis: GenesisSource::GenesisBuilderApi(self.genesis_build_action, self.code.into()), + _host_functions: Default::default(), } } } /// A configuration of a chain. Can be used to build a genesis block. -pub struct ChainSpec { +/// +/// The chain spec is generic over the native `RuntimeGenesisConfig` struct (`G`). It is also +/// possible to parametrize chain spec over the extended host functions (EHF). It should be use if +/// runtime is using the non-standard host function during genesis state creation. +pub struct ChainSpec { client_spec: ClientSpec, genesis: GenesisSource, + _host_functions: PhantomData, } -impl Clone for ChainSpec { +impl Clone for ChainSpec { fn clone(&self) -> Self { - ChainSpec { client_spec: self.client_spec.clone(), genesis: self.genesis.clone() } + ChainSpec { + client_spec: self.client_spec.clone(), + genesis: self.genesis.clone(), + _host_functions: self._host_functions, + } } } -impl ChainSpec { +impl ChainSpec { /// A list of bootnode addresses. pub fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { &self.client_spec.boot_nodes @@ -553,6 +566,7 @@ impl ChainSpec { ChainSpec { client_spec, genesis: GenesisSource::Factory(Arc::new(constructor), code.into()), + _host_functions: Default::default(), } } @@ -562,19 +576,23 @@ impl ChainSpec { } /// Provides a `ChainSpec` builder. - pub fn builder(code: &[u8], extensions: E) -> ChainSpecBuilder { + pub fn builder(code: &[u8], extensions: E) -> ChainSpecBuilder { ChainSpecBuilder::new(code, extensions) } } -impl ChainSpec { +impl ChainSpec { /// Parse json content into a `ChainSpec` pub fn from_json_bytes(json: impl Into>) -> Result { let json = json.into(); let client_spec = json::from_slice(json.as_ref()) .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { client_spec, genesis: GenesisSource::Binary(json) }) + Ok(ChainSpec { + client_spec, + genesis: GenesisSource::Binary(json), + _host_functions: Default::default(), + }) } /// Parse json file into a `ChainSpec` @@ -593,7 +611,11 @@ impl ChainSpec { genesis: Genesis, } -impl ChainSpec { +impl ChainSpec +where + EHF: HostFunctions, +{ fn json_container(&self, raw: bool) -> Result, String> { let raw_genesis = match (raw, self.genesis.resolve()?) { ( @@ -618,7 +643,8 @@ impl ChainSpec { code, }), ) => { - let mut storage = RuntimeCaller::new(&code[..]).get_storage_for_config(config)?; + let mut storage = + RuntimeCaller::::new(&code[..]).get_storage_for_config(config)?; storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); RawGenesis::from(storage) }, @@ -629,7 +655,8 @@ impl ChainSpec { code, }), ) => { - let mut storage = RuntimeCaller::new(&code[..]).get_storage_for_patch(patch)?; + let mut storage = + RuntimeCaller::::new(&code[..]).get_storage_for_patch(patch)?; storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code); RawGenesis::from(storage) }, @@ -664,10 +691,11 @@ impl ChainSpec { } } -impl crate::ChainSpec for ChainSpec +impl crate::ChainSpec for ChainSpec where G: RuntimeGenesis + 'static, E: GetExtension + serde::Serialize + Clone + Send + Sync + 'static, + EHF: HostFunctions, { fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { ChainSpec::boot_nodes(self) @@ -953,7 +981,7 @@ mod tests { #[docify::export] #[test] fn build_chain_spec_with_patch_works() { - let output: ChainSpec<()> = ChainSpec::builder( + let output = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), ) @@ -986,7 +1014,7 @@ mod tests { #[docify::export] #[test] fn generate_chain_spec_with_patch_works() { - let output: ChainSpec<()> = ChainSpec::builder( + let output = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), ) @@ -1033,7 +1061,7 @@ mod tests { #[test] fn generate_chain_spec_with_full_config_works() { let j = include_str!("../../../test-utils/runtime/res/default_genesis_config.json"); - let output: ChainSpec<()> = ChainSpec::builder( + let output = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), ) @@ -1065,7 +1093,7 @@ mod tests { fn chain_spec_as_json_fails_with_invalid_config() { let j = include_str!("../../../test-utils/runtime/res/default_genesis_config_invalid_2.json"); - let output: ChainSpec<()> = ChainSpec::builder( + let output = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), ) @@ -1083,7 +1111,7 @@ mod tests { #[test] fn chain_spec_as_json_fails_with_invalid_patch() { - let output: ChainSpec<()> = ChainSpec::builder( + let output = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), ) @@ -1139,7 +1167,7 @@ mod tests { #[test] fn update_code_works_with_runtime_genesis_config() { let j = include_str!("../../../test-utils/runtime/res/default_genesis_config.json"); - let chain_spec: ChainSpec<()> = ChainSpec::builder( + let chain_spec = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), ) @@ -1162,7 +1190,7 @@ mod tests { #[test] fn update_code_works_for_raw() { let j = include_str!("../../../test-utils/runtime/res/default_genesis_config.json"); - let chain_spec: ChainSpec<()> = ChainSpec::builder( + let chain_spec = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), ) @@ -1184,7 +1212,7 @@ mod tests { #[test] fn update_code_works_with_runtime_genesis_patch() { - let chain_spec: ChainSpec<()> = ChainSpec::builder( + let chain_spec = ChainSpec::<()>::builder( substrate_test_runtime::wasm_binary_unwrap().into(), Default::default(), ) diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index c2c6249563f..9ccf6b4efb2 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -19,6 +19,7 @@ //! A helper module for calling the GenesisBuilder API from arbitrary runtime wasm blobs. use codec::{Decode, Encode}; +pub use sc_executor::sp_wasm_interface::HostFunctions; use sc_executor::{error::Result, WasmExecutor}; use serde_json::{from_slice, Value}; use sp_core::{ @@ -30,19 +31,31 @@ use sp_state_machine::BasicExternalities; use std::borrow::Cow; /// A utility that facilitates calling the GenesisBuilder API from the runtime wasm code blob. -pub struct GenesisConfigBuilderRuntimeCaller<'a> { +/// +/// `EHF` type allows to specify the extended host function required for building runtime's genesis +/// config. The type will be compbined with default `sp_io::SubstrateHostFunctions`. +pub struct GenesisConfigBuilderRuntimeCaller<'a, EHF = ()> +where + EHF: HostFunctions, +{ code: Cow<'a, [u8]>, code_hash: Vec, - executor: WasmExecutor, + executor: WasmExecutor<(sp_io::SubstrateHostFunctions, EHF)>, } -impl<'a> FetchRuntimeCode for GenesisConfigBuilderRuntimeCaller<'a> { +impl<'a, EHF> FetchRuntimeCode for GenesisConfigBuilderRuntimeCaller<'a, EHF> +where + EHF: HostFunctions, +{ fn fetch_runtime_code(&self) -> Option> { Some(self.code.as_ref().into()) } } -impl<'a> GenesisConfigBuilderRuntimeCaller<'a> { +impl<'a, EHF> GenesisConfigBuilderRuntimeCaller<'a, EHF> +where + EHF: HostFunctions, +{ /// Creates new instance using the provided code blob. /// /// This code is later referred to as `runtime`. @@ -50,7 +63,7 @@ impl<'a> GenesisConfigBuilderRuntimeCaller<'a> { GenesisConfigBuilderRuntimeCaller { code: code.into(), code_hash: sp_core::blake2_256(code).to_vec(), - executor: WasmExecutor::::builder() + executor: WasmExecutor::<(sp_io::SubstrateHostFunctions, EHF)>::builder() .with_allow_missing_host_functions(true) .build(), } @@ -134,7 +147,7 @@ mod tests { #[test] fn get_default_config_works() { let config = - GenesisConfigBuilderRuntimeCaller::new(substrate_test_runtime::wasm_binary_unwrap()) + ::new(substrate_test_runtime::wasm_binary_unwrap()) .get_default_config() .unwrap(); let expected = r#"{"system":{},"babe":{"authorities":[],"epochConfig":null},"substrateTest":{"authorities":[]},"balances":{"balances":[]}}"#; @@ -156,7 +169,7 @@ mod tests { }); let storage = - GenesisConfigBuilderRuntimeCaller::new(substrate_test_runtime::wasm_binary_unwrap()) + ::new(substrate_test_runtime::wasm_binary_unwrap()) .get_storage_for_patch(patch) .unwrap(); -- GitLab From eabf9fb89771d997e1cd9f93c429d2c505a3401d Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 8 Nov 2023 18:10:40 +0100 Subject: [PATCH 012/199] integrations-test: `build_genesis_storage` name fix (#2232) Some legacy tests were mistakenly merged in #1256 for `emulated-integration-tests-common` crate. This PR fixes the function name `build_genesis_storage` (no need to use `legacy` suffix, even though the genesis is built from `RuntimeGenesisConfig`). --- .../parachains/assets/asset-hub-rococo/src/genesis.rs | 4 ++-- .../parachains/assets/asset-hub-westend/src/genesis.rs | 4 ++-- .../parachains/bridges/bridge-hub-rococo/src/genesis.rs | 4 ++-- .../parachains/bridges/bridge-hub-westend/src/genesis.rs | 4 ++-- .../chains/parachains/testing/penpal/src/genesis.rs | 4 ++-- .../emulated/chains/relays/rococo/src/genesis.rs | 6 +++--- .../emulated/chains/relays/westend/src/genesis.rs | 4 ++-- .../parachains/integration-tests/emulated/common/src/lib.rs | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs index efc94722d90..c19d05ec730 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -18,7 +18,7 @@ use sp_core::storage::Storage; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage_legacy, collators, SAFE_XCM_VERSION, + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, }; use parachains_common::Balance; @@ -63,7 +63,7 @@ pub fn genesis() -> Storage { ..Default::default() }; - build_genesis_storage_legacy( + build_genesis_storage( &genesis_config, asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), ) diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs index 19e7a60b030..78d6478fdf1 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -18,7 +18,7 @@ use sp_core::storage::Storage; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage_legacy, collators, SAFE_XCM_VERSION, + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, }; use parachains_common::Balance; @@ -59,7 +59,7 @@ pub fn genesis() -> Storage { ..Default::default() }; - build_genesis_storage_legacy( + build_genesis_storage( &genesis_config, asset_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs index 299c5909670..4af84c82e98 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs @@ -18,7 +18,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage_legacy, collators, get_account_id_from_seed, SAFE_XCM_VERSION, + accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, }; use parachains_common::Balance; @@ -75,7 +75,7 @@ pub fn genesis() -> Storage { ..Default::default() }; - build_genesis_storage_legacy( + build_genesis_storage( &genesis_config, bridge_hub_rococo_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs index 5acd3eb2c60..cd578d6862f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs @@ -18,7 +18,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage_legacy, collators, get_account_id_from_seed, SAFE_XCM_VERSION, + accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, }; use parachains_common::Balance; @@ -67,7 +67,7 @@ pub fn genesis() -> Storage { ..Default::default() }; - build_genesis_storage_legacy( + build_genesis_storage( &genesis_config, bridge_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs index 8b03b599d5f..9ab32a977d7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -18,7 +18,7 @@ use sp_core::{sr25519, storage::Storage}; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage_legacy, collators, get_account_id_from_seed, SAFE_XCM_VERSION, + accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, }; use parachains_common::Balance; @@ -64,7 +64,7 @@ pub fn genesis(para_id: u32) -> Storage { ..Default::default() }; - build_genesis_storage_legacy( + build_genesis_storage( &genesis_config, penpal_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), ) diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs index 876df212e4c..6f5f3923ead 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -26,8 +26,8 @@ use polkadot_primitives::{AssignmentId, ValidatorId}; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage_legacy, get_account_id_from_seed, get_from_seed, - get_host_config, validators, + accounts, build_genesis_storage, get_account_id_from_seed, get_from_seed, get_host_config, + validators, }; use parachains_common::Balance; use rococo_runtime_constants::currency::UNITS as ROC; @@ -97,5 +97,5 @@ pub fn genesis() -> Storage { ..Default::default() }; - build_genesis_storage_legacy(&genesis_config, rococo_runtime::WASM_BINARY.unwrap()) + build_genesis_storage(&genesis_config, rococo_runtime::WASM_BINARY.unwrap()) } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs index 8bdc34f0eed..e87b85881d3 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -27,7 +27,7 @@ use polkadot_primitives::{AssignmentId, ValidatorId}; // Cumulus use emulated_integration_tests_common::{ - accounts, build_genesis_storage_legacy, get_from_seed, get_host_config, validators, + accounts, build_genesis_storage, get_from_seed, get_host_config, validators, }; use parachains_common::Balance; use westend_runtime_constants::currency::UNITS as WND; @@ -105,5 +105,5 @@ pub fn genesis() -> Storage { ..Default::default() }; - build_genesis_storage_legacy(&genesis_config, westend_runtime::WASM_BINARY.unwrap()) + build_genesis_storage(&genesis_config, westend_runtime::WASM_BINARY.unwrap()) } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 74db02db003..952b053f2aa 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -86,7 +86,7 @@ pub fn get_host_config() -> HostConfiguration { /// Helper function used in tests to build the genesis storage using given RuntimeGenesisConfig and /// code Used in `legacy_vs_json_check` submods to verify storage building with JSON patch against /// building with RuntimeGenesisConfig struct. -pub fn build_genesis_storage_legacy(builder: &dyn BuildStorage, code: &[u8]) -> Storage { +pub fn build_genesis_storage(builder: &dyn BuildStorage, code: &[u8]) -> Storage { let mut storage = builder.build_storage().unwrap(); storage .top -- GitLab From 69494ea70ba9082b2f8494695361b15db25ec29d Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Wed, 8 Nov 2023 18:33:45 +0100 Subject: [PATCH 013/199] Add prospective-parachain subsystem to minimal-relay-node + QoL improvements (#2223) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains some fixes and cleanups for parachain nodes: 1. When using async backing, node no longer complains about being unable to reach the prospective-parachain subsystem. 2. Parachain warp sync now informs users that the finalized para block has been retrieved. ``` 2023-11-08 13:24:42 [Parachain] 🎉 Received finalized parachain header #5747719 (0xa0aa…674b) from the relay chain. ``` 3. When a user supplied an invalid `--relay-chain-rpc-url`, we were crashing with a very verbose message. Removed the `expect` and improved the error message. ``` 2023-11-08 13:57:56 [Parachain] No valid RPC url found. Stopping RPC worker. 2023-11-08 13:57:56 [Parachain] Essential task `relay-chain-rpc-worker` failed. Shutting down service. Error: Service(Application(WorkerCommunicationError("RPC worker channel closed. This can hint and connectivity issues with the supplied RPC endpoints. Message: oneshot canceled"))) ``` --- Cargo.lock | 1 + .../client/relay-chain-minimal-node/Cargo.toml | 1 + .../src/collator_overseer.rs | 3 ++- .../client/relay-chain-minimal-node/src/lib.rs | 7 +------ .../src/rpc_client.rs | 2 +- cumulus/client/service/src/lib.rs | 18 +++++++++++------- 6 files changed, 17 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18eb00a13c1..54fa7c6485b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4198,6 +4198,7 @@ dependencies = [ "polkadot-core-primitives", "polkadot-network-bridge", "polkadot-node-collation-generation", + "polkadot-node-core-prospective-parachains", "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", "polkadot-node-subsystem-util", diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 6518e09cbb5..8c196f66b72 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -18,6 +18,7 @@ polkadot-collator-protocol = { path = "../../../polkadot/node/network/collator-p polkadot-network-bridge = { path = "../../../polkadot/node/network/bridge" } polkadot-node-collation-generation = { path = "../../../polkadot/node/collation-generation" } polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } +polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } # substrate deps sc-authority-discovery = { path = "../../../substrate/client/authority-discovery" } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 216ca0d3965..379217e4a63 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -24,6 +24,7 @@ use polkadot_network_bridge::{ NetworkBridgeTx as NetworkBridgeTxSubsystem, }; use polkadot_node_collation_generation::CollationGenerationSubsystem; +use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; use polkadot_node_core_runtime_api::RuntimeApiSubsystem; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, @@ -144,7 +145,7 @@ fn build_overseer( spawner.clone(), )) .statement_distribution(DummySubsystem) - .prospective_parachains(DummySubsystem) + .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) .approval_distribution(DummySubsystem) .approval_voting(DummySubsystem) .gossip_support(DummySubsystem) diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index c8fba923dde..8801f93640c 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -171,12 +171,7 @@ async fn new_minimal_relay_chain( ); } - let genesis_hash = relay_chain_rpc_client - .block_get_hash(Some(0)) - .await - .expect("Genesis block hash is always available; qed") - .unwrap_or_default(); - + let genesis_hash = relay_chain_rpc_client.block_get_hash(Some(0)).await?.unwrap_or_default(); let peer_set_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 5924716adcb..90af334e133 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -201,7 +201,7 @@ impl RelayChainRpcClient { let value = rx.await.map_err(|err| { RelayChainError::WorkerCommunicationError(format!( - "Unexpected channel close on RPC worker side: {}", + "RPC worker channel closed. This can hint and connectivity issues with the supplied RPC endpoints. Message: {}", err )) })??; diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 82890666eba..f8ebca11c8c 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -49,7 +49,7 @@ use sc_utils::mpsc::TracingUnboundedSender; use sp_api::ProvideRuntimeApi; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_core::{traits::SpawnNamed, Decode}; -use sp_runtime::traits::{Block as BlockT, BlockIdTo}; +use sp_runtime::traits::{Block as BlockT, BlockIdTo, Header}; use std::{sync::Arc, time::Duration}; // Given the sporadic nature of the explicit recovery operation and the @@ -505,11 +505,11 @@ where None, async move { log::debug!( - target: "cumulus-network", + target: LOG_TARGET_SYNC, "waiting for announce block in a background task...", ); - let _ = wait_for_target_block::(sender, para_id, relay_chain_interface) + let _ = wait_for_finalized_para_head::(sender, para_id, relay_chain_interface) .await .map_err(|e| { log::error!( @@ -527,7 +527,7 @@ where /// Waits for the relay chain to have finished syncing and then gets the parachain header that /// corresponds to the last finalized relay chain block. -async fn wait_for_target_block( +async fn wait_for_finalized_para_head( sender: oneshot::Sender<::Header>, para_id: ParaId, relay_chain_interface: RCInterface, @@ -560,11 +560,15 @@ where .map_err(|e| format!("{e:?}"))? .ok_or("Could not find parachain head in relay chain")?; - let target_block = B::Header::decode(&mut &validation_data.parent_head.0[..]) + let finalized_header = B::Header::decode(&mut &validation_data.parent_head.0[..]) .map_err(|e| format!("Failed to decode parachain head: {e}"))?; - log::debug!(target: LOG_TARGET_SYNC, "Target block reached {:?}", target_block); - let _ = sender.send(target_block); + log::info!( + "🎉 Received target parachain header #{} ({}) from the relay chain.", + finalized_header.number(), + finalized_header.hash() + ); + let _ = sender.send(finalized_header); return Ok(()) } } -- GitLab From 37bb02ef62cee28dd67a0075cd424aa6aa1c6b18 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Wed, 8 Nov 2023 18:35:54 +0100 Subject: [PATCH 014/199] Make PalletInfo fields public (#2231) PalletInfo fields were private, preventing a user from actually using the QueryPallet instruction in a meaningful way since they couldn't read the received data. --- polkadot/xcm/src/v3/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index 7ec3f6f40af..4217528f227 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -243,15 +243,15 @@ parameter_types! { #[scale_info(replace_segment("staging_xcm", "xcm"))] pub struct PalletInfo { #[codec(compact)] - index: u32, - name: BoundedVec, - module_name: BoundedVec, + pub index: u32, + pub name: BoundedVec, + pub module_name: BoundedVec, #[codec(compact)] - major: u32, + pub major: u32, #[codec(compact)] - minor: u32, + pub minor: u32, #[codec(compact)] - patch: u32, + pub patch: u32, } impl PalletInfo { -- GitLab From 3f7c743d73afc72593ea1a3af139706e7e0782d9 Mon Sep 17 00:00:00 2001 From: Vincent Geddes Date: Wed, 8 Nov 2023 21:18:41 +0200 Subject: [PATCH 015/199] BridgeHub Runtimes: Change registration order of `MessageQueue` pallet (#2230) This PR changes the registration order of the `MessageQueue` pallet so that it is registered last. This is necessary so that the [on_initialize](https://github.com/Snowfork/snowbridge/blob/df8d5da82e517a65fb0858a4f2ead533290336b5/parachain/pallets/outbound-queue/src/lib.rs#L267) hooks for Snowbridge can run before `MessageQueue` delivers messages using its own `on_initialize`. Generally, I think this is preferable regardless of Snowbridge's particular requirements. Other pallets may want to do housekeeping before MessageQueue starts delivering messages. I'm hoping this PR, if accepted, can be included in the same release as https://github.com/paritytech/polkadot-sdk/pull/1246. As otherwise, changing the order of pallet registration is an ABI-breaking change. --- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 5 ++++- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index b533f7adc2a..e5d38bcac23 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -524,7 +524,6 @@ construct_runtime!( PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -564,6 +563,10 @@ construct_runtime!( BridgeWestendMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 51, BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event} = 47, + + // Message Queue. Importantly, is registered last so that messages are processed after + // the `on_initialize` hooks of bridging pallets. + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 250, } ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index c0b0bf87180..458876ce46c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -500,7 +500,6 @@ construct_runtime!( PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -511,6 +510,10 @@ construct_runtime!( BridgeRococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 42, BridgeRococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 43, BridgeRococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 44, + + // Message Queue. Importantly, is registered last so that messages are processed after + // the `on_initialize` hooks of bridging pallets. + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 250, } ); -- GitLab From e4f5f3c9c512d15193d99697795e4adfc641a6bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 8 Nov 2023 23:30:41 +0100 Subject: [PATCH 016/199] Rococo: Build two versions of the wasm binary (#2229) One for local networks with `fast-runtime` feature activated (1 minute sessions) and one without the feature activated that will be the default that runs with 1 hour long sessions. --- polkadot/node/service/src/chain_spec.rs | 2 +- polkadot/runtime/rococo/build.rs | 11 +++++++---- polkadot/runtime/rococo/constants/src/lib.rs | 7 ++++--- polkadot/runtime/rococo/src/lib.rs | 8 ++++++++ 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 04e142230b4..871d7e82911 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -1068,7 +1068,7 @@ fn rococo_local_testnet_genesis() -> serde_json::Value { #[cfg(feature = "rococo-native")] pub fn rococo_local_testnet_config() -> Result { Ok(RococoChainSpec::builder( - rococo::WASM_BINARY.ok_or("Rococo development wasm not available")?, + rococo::fast_runtime_binary::WASM_BINARY.ok_or("Rococo development wasm not available")?, Default::default(), ) .with_name("Rococo Local Testnet") diff --git a/polkadot/runtime/rococo/build.rs b/polkadot/runtime/rococo/build.rs index ed32d33105b..0b7ee77b0d0 100644 --- a/polkadot/runtime/rococo/build.rs +++ b/polkadot/runtime/rococo/build.rs @@ -16,16 +16,19 @@ #[cfg(feature = "std")] fn main() { - // note: needs to be synced with rococo-runtime-constants::time hard-coded string literal - const ROCOCO_EPOCH_DURATION_ENV: &str = "ROCOCO_EPOCH_DURATION"; - substrate_wasm_builder::WasmBuilder::new() .with_current_project() .import_memory() .export_heap_base() .build(); - println!("cargo:rerun-if-env-changed={}", ROCOCO_EPOCH_DURATION_ENV); + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .set_file_name("fast_runtime_binary.rs") + .enable_feature("fast-runtime") + .import_memory() + .export_heap_base() + .build(); } #[cfg(not(feature = "std"))] diff --git a/polkadot/runtime/rococo/constants/src/lib.rs b/polkadot/runtime/rococo/constants/src/lib.rs index 84594cffcf3..2f641d60fc8 100644 --- a/polkadot/runtime/rococo/constants/src/lib.rs +++ b/polkadot/runtime/rococo/constants/src/lib.rs @@ -37,14 +37,15 @@ pub mod currency { /// Time and blocks. pub mod time { + use runtime_common::prod_or_fast; + use primitives::{BlockNumber, Moment}; pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; frame_support::parameter_types! { - pub storage EpochDurationInBlocks: BlockNumber = option_env!("ROCOCO_EPOCH_DURATION") - .map(|s| s.parse().expect("`ROCOCO_EPOCH_DURATION` is not a valid `BlockNumber`")) - .unwrap_or(1 * MINUTES); + pub EpochDurationInBlocks: BlockNumber = + prod_or_fast!(1 * HOURS, 1 * MINUTES, "ROCOCO_EPOCH_DURATION"); } // These time units are defined in number of blocks. diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 257f364dca0..4f542354346 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -131,6 +131,14 @@ impl_runtime_weights!(rococo_runtime_constants); #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Provides the `WASM_BINARY` build with `fast-runtime` feature enabled. +/// +/// This is for example useful for local test chains. +#[cfg(feature = "std")] +pub mod fast_runtime_binary { + include!(concat!(env!("OUT_DIR"), "/fast_runtime_binary.rs")); +} + /// Runtime version (Rococo). #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { -- GitLab From 6a23c2316280d16986d59f7e1fdc3311a0bccf89 Mon Sep 17 00:00:00 2001 From: RadiumBlock Date: Wed, 8 Nov 2023 16:43:25 -0600 Subject: [PATCH 017/199] Add RadiumBlock Bootnodes for parachains (#2224) # Description We would like to add our bootnodes to the following parachains: Westend: Westmint, Bridgehub Kusama: Statemine, Bridgehub Polkadot: Statemint, Bridgehub, Collectives Thank you. --------- Co-authored-by: Oliver Tale-Yazdi --- cumulus/parachains/chain-specs/asset-hub-kusama.json | 4 +++- cumulus/parachains/chain-specs/asset-hub-polkadot.json | 4 +++- cumulus/parachains/chain-specs/asset-hub-westend.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-kusama.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-polkadot.json | 4 +++- cumulus/parachains/chain-specs/bridge-hub-westend.json | 4 +++- cumulus/parachains/chain-specs/collectives-polkadot.json | 4 +++- 7 files changed, 21 insertions(+), 7 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 519811f5ec8..946bfc5983d 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -20,7 +20,9 @@ "/dns/statemine.bootnode.amforc.com/tcp/30336/p2p/12D3KooWHmSyrBWsc6fdpq8HtCFWasmLVLYGKWA2a78m4xAHKyBq", "/dns/statemine.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWHmSyrBWsc6fdpq8HtCFWasmLVLYGKWA2a78m4xAHKyBq", "/dns/statemine-boot-ng.dwellir.com/tcp/30343/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", - "/dns/statemine-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk" + "/dns/statemine-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", + "/dns/statemine-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", + "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index bbf90693ca3..c26506eb995 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -20,7 +20,9 @@ "/dns/statemint.bootnode.amforc.com/tcp/30341/p2p/12D3KooWByohP9FXn7ao8syS167qJsbFdpa7fY2Y24xbKtt3r7Ls", "/dns/statemint.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWByohP9FXn7ao8syS167qJsbFdpa7fY2Y24xbKtt3r7Ls", "/dns/statemint-boot-ng.dwellir.com/tcp/30344/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", - "/dns/statemint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr" + "/dns/statemint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", + "/dns/statemint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", + "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index f6cdccee10a..f0e71981e7a 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -18,7 +18,9 @@ "/dns/westmint.bootnode.amforc.com/tcp/30339/p2p/12D3KooWNjKeaANaeZxBAPctmx8jugSYzuw4vnSCJmEDPB5mtRd6", "/dns/westmint.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWNjKeaANaeZxBAPctmx8jugSYzuw4vnSCJmEDPB5mtRd6", "/dns/westmint-boot-ng.dwellir.com/tcp/30345/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", - "/dns/westmint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux" + "/dns/westmint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", + "/dns/westmint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", + "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index c4a7d6358f7..9daa60fa263 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -20,7 +20,9 @@ "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30337/p2p/12D3KooWGNeQJ5rXnEJkVUuQqwHd8aV5GkTAheaRoCaK8ZwW94id", "/dns/bridge-hub-kusama.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWGNeQJ5rXnEJkVUuQqwHd8aV5GkTAheaRoCaK8ZwW94id", "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/30337/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", - "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5" + "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", + "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", + "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index e44a4dacd48..d3e884284b4 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -16,7 +16,9 @@ "/dns/boot.metaspan.io/tcp/16032/p2p/12D3KooWQTfRnrK3FfbrotpSP5RVJbjBHVBSu8VSzhj9qcvjaqnZ", "/dns/boot.metaspan.io/tcp/16036/wss/p2p/12D3KooWQTfRnrK3FfbrotpSP5RVJbjBHVBSu8VSzhj9qcvjaqnZ", "/dns/boot-node.helikon.io/tcp/8220/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp", - "/dns/boot-node.helikon.io/tcp/8222/wss/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp" + "/dns/boot-node.helikon.io/tcp/8222/wss/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp", + "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", + "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index 0a441b3a295..dde3d437f41 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -14,7 +14,9 @@ "/dns/boot.metaspan.io/tcp/36032/p2p/12D3KooWPaLsu3buByBnGFQnp5UP4q1S652dGVft92TFeChizFir", "/dns/boot.metaspan.io/tcp/36036/wss/p2p/12D3KooWPaLsu3buByBnGFQnp5UP4q1S652dGVft92TFeChizFir", "/dns/boot-node.helikon.io/tcp/9220/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp", - "/dns/boot-node.helikon.io/tcp/9222/wss/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp" + "/dns/boot-node.helikon.io/tcp/9222/wss/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp", + "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", + "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index 974be338857..003c6373429 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -20,7 +20,9 @@ "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30335/p2p/12D3KooWQeAjDnGkrPe5vtpfnB6ydZfWyMxyrXLkBFmA6o4k9aiU", "/dns/collectives-polkadot.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWQeAjDnGkrPe5vtpfnB6ydZfWyMxyrXLkBFmA6o4k9aiU", "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/30341/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", - "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc" + "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", + "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", + "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c" ], "telemetryEndpoints": null, "protocolId": null, -- GitLab From d347d68868a38841ba51f2e91cef945a7ea50dd4 Mon Sep 17 00:00:00 2001 From: Keith Yeung Date: Thu, 9 Nov 2023 16:30:37 +0800 Subject: [PATCH 018/199] Remove unnecessary map_error (#2239) This was discovered during a debugging session, and it only served to mask the underlying error, which was not great. --- polkadot/xcm/xcm-builder/src/routing.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/polkadot/xcm/xcm-builder/src/routing.rs b/polkadot/xcm/xcm-builder/src/routing.rs index 39e9eab410b..f4c18adddb3 100644 --- a/polkadot/xcm/xcm-builder/src/routing.rs +++ b/polkadot/xcm/xcm-builder/src/routing.rs @@ -49,8 +49,7 @@ impl SendXcm for WithUniqueTopic { message.0.push(SetTopic(unique_id)); unique_id }; - let (ticket, assets) = Inner::validate(destination, &mut Some(message)) - .map_err(|_| SendError::NotApplicable)?; + let (ticket, assets) = Inner::validate(destination, &mut Some(message))?; Ok(((ticket, unique_id), assets)) } -- GitLab From 48ea86f0e86c0cef21a8df978307d93d7fbc19f6 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Thu, 9 Nov 2023 16:37:28 +0100 Subject: [PATCH 019/199] Add descriptions to all published crates (#2029) Missing descriptions (47): - [x] `cumulus/client/collator/Cargo.toml` - [x] `cumulus/client/relay-chain-inprocess-interface/Cargo.toml` - [x] `cumulus/client/cli/Cargo.toml` - [x] `cumulus/client/service/Cargo.toml` - [x] `cumulus/client/relay-chain-rpc-interface/Cargo.toml` - [x] `cumulus/client/relay-chain-interface/Cargo.toml` - [x] `cumulus/client/relay-chain-minimal-node/Cargo.toml` - [x] `cumulus/parachains/pallets/parachain-info/Cargo.toml` - [x] `cumulus/parachains/pallets/ping/Cargo.toml` - [x] `cumulus/primitives/utility/Cargo.toml` - [x] `cumulus/primitives/aura/Cargo.toml` - [x] `cumulus/primitives/core/Cargo.toml` - [x] `cumulus/primitives/parachain-inherent/Cargo.toml` - [x] `cumulus/test/relay-sproof-builder/Cargo.toml` - [x] `cumulus/pallets/xcmp-queue/Cargo.toml` - [x] `cumulus/pallets/dmp-queue/Cargo.toml` - [x] `cumulus/pallets/xcm/Cargo.toml` - [x] `polkadot/erasure-coding/Cargo.toml` - [x] `polkadot/statement-table/Cargo.toml` - [x] `polkadot/primitives/Cargo.toml` - [x] `polkadot/rpc/Cargo.toml` - [x] `polkadot/node/service/Cargo.toml` - [x] `polkadot/node/core/parachains-inherent/Cargo.toml` - [x] `polkadot/node/core/approval-voting/Cargo.toml` - [x] `polkadot/node/core/dispute-coordinator/Cargo.toml` - [x] `polkadot/node/core/av-store/Cargo.toml` - [x] `polkadot/node/core/chain-api/Cargo.toml` - [x] `polkadot/node/core/prospective-parachains/Cargo.toml` - [x] `polkadot/node/core/backing/Cargo.toml` - [x] `polkadot/node/core/provisioner/Cargo.toml` - [x] `polkadot/node/core/runtime-api/Cargo.toml` - [x] `polkadot/node/core/bitfield-signing/Cargo.toml` - [x] `polkadot/node/network/dispute-distribution/Cargo.toml` - [x] `polkadot/node/network/bridge/Cargo.toml` - [x] `polkadot/node/network/collator-protocol/Cargo.toml` - [x] `polkadot/node/network/approval-distribution/Cargo.toml` - [x] `polkadot/node/network/availability-distribution/Cargo.toml` - [x] `polkadot/node/network/bitfield-distribution/Cargo.toml` - [x] `polkadot/node/network/gossip-support/Cargo.toml` - [x] `polkadot/node/network/availability-recovery/Cargo.toml` - [x] `polkadot/node/collation-generation/Cargo.toml` - [x] `polkadot/node/overseer/Cargo.toml` - [x] `polkadot/runtime/parachains/Cargo.toml` - [x] `polkadot/runtime/common/slot_range_helper/Cargo.toml` - [x] `polkadot/runtime/metrics/Cargo.toml` - [x] `polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml` - [x] `polkadot/utils/generate-bags/Cargo.toml` - [x] `substrate/bin/minimal/runtime/Cargo.toml` --------- Signed-off-by: Oliver Tale-Yazdi Signed-off-by: alindima Co-authored-by: ordian Co-authored-by: Tsvetomir Dimitrov Co-authored-by: Marcin S Co-authored-by: alindima Co-authored-by: Sebastian Kunert Co-authored-by: Dmitry Markin Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Liam Aharon --- cumulus/client/cli/Cargo.toml | 1 + cumulus/client/collator/Cargo.toml | 1 + cumulus/client/relay-chain-inprocess-interface/Cargo.toml | 1 + cumulus/client/relay-chain-interface/Cargo.toml | 1 + cumulus/client/relay-chain-minimal-node/Cargo.toml | 1 + cumulus/client/relay-chain-rpc-interface/Cargo.toml | 1 + cumulus/client/service/Cargo.toml | 1 + cumulus/pallets/xcm/Cargo.toml | 1 + cumulus/pallets/xcmp-queue/Cargo.toml | 1 + cumulus/parachains/pallets/parachain-info/Cargo.toml | 1 + cumulus/parachains/pallets/ping/Cargo.toml | 1 + cumulus/primitives/aura/Cargo.toml | 1 + cumulus/primitives/core/Cargo.toml | 1 + cumulus/primitives/parachain-inherent/Cargo.toml | 1 + cumulus/primitives/utility/Cargo.toml | 1 + cumulus/test/relay-sproof-builder/Cargo.toml | 1 + polkadot/erasure-coding/Cargo.toml | 1 + polkadot/node/collation-generation/Cargo.toml | 1 + polkadot/node/core/approval-voting/Cargo.toml | 1 + polkadot/node/core/av-store/Cargo.toml | 1 + polkadot/node/core/backing/Cargo.toml | 1 + polkadot/node/core/bitfield-signing/Cargo.toml | 1 + polkadot/node/core/chain-api/Cargo.toml | 1 + polkadot/node/core/dispute-coordinator/Cargo.toml | 1 + polkadot/node/core/parachains-inherent/Cargo.toml | 1 + polkadot/node/core/prospective-parachains/Cargo.toml | 1 + polkadot/node/core/provisioner/Cargo.toml | 1 + polkadot/node/core/runtime-api/Cargo.toml | 1 + polkadot/node/network/approval-distribution/Cargo.toml | 1 + polkadot/node/network/availability-distribution/Cargo.toml | 1 + polkadot/node/network/availability-recovery/Cargo.toml | 1 + polkadot/node/network/bitfield-distribution/Cargo.toml | 1 + polkadot/node/network/bridge/Cargo.toml | 1 + polkadot/node/network/collator-protocol/Cargo.toml | 1 + polkadot/node/network/dispute-distribution/Cargo.toml | 1 + polkadot/node/network/gossip-support/Cargo.toml | 1 + polkadot/node/overseer/Cargo.toml | 1 + polkadot/node/service/Cargo.toml | 1 + polkadot/primitives/Cargo.toml | 1 + polkadot/rpc/Cargo.toml | 1 + polkadot/runtime/common/slot_range_helper/Cargo.toml | 1 + polkadot/runtime/metrics/Cargo.toml | 1 + polkadot/runtime/parachains/Cargo.toml | 1 + polkadot/statement-table/Cargo.toml | 1 + polkadot/utils/generate-bags/Cargo.toml | 1 + polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 1 + substrate/bin/minimal/runtime/Cargo.toml | 6 +++++- 47 files changed, 51 insertions(+), 1 deletion(-) diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 5d9752dbb20..0f942feb595 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-client-cli" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Parachain node CLI utilities." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index 30798f84800..ad9f01ed083 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-client-collator" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Common node-side functionality and glue code to collate parachain blocks." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 19c99c5cb72..87f0eabd9b5 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true name = "cumulus-relay-chain-inprocess-interface" version = "0.1.0" edition.workspace = true +description = "Implementation of the RelayChainInterface trait for Polkadot full-nodes." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/cumulus/client/relay-chain-interface/Cargo.toml b/cumulus/client/relay-chain-interface/Cargo.toml index 803df7d302b..c9d50afe8fa 100644 --- a/cumulus/client/relay-chain-interface/Cargo.toml +++ b/cumulus/client/relay-chain-interface/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true name = "cumulus-relay-chain-interface" version = "0.1.0" edition.workspace = true +description = "Common interface for different relay chain datasources." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 8c196f66b72..53173fb4118 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true name = "cumulus-relay-chain-minimal-node" version = "0.1.0" edition.workspace = true +description = "Minimal node implementation to be used in tandem with RPC or light-client mode." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 9e5a8f1eccb..93fe57145b0 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -3,6 +3,7 @@ authors.workspace = true name = "cumulus-relay-chain-rpc-interface" version = "0.1.0" edition.workspace = true +description = "Implementation of the RelayChainInterface trait that connects to a remote RPC-node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index 314aebdcb9c..f80c65128d5 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-client-service" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Common functions used to assemble the components of a parachain node." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index d79e57bceac..c8e819979bd 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -4,6 +4,7 @@ edition.workspace = true name = "cumulus-pallet-xcm" version = "0.1.0" license = "Apache-2.0" +description = "Pallet for stuff specific to parachains' usage of XCM" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index e526a28d084..fba006e7986 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-pallet-xcmp-queue" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Pallet to queue outbound and inbound XCMP messages." license = "Apache-2.0" [dependencies] diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 40f1a07c2dd..727182dfb8e 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -4,6 +4,7 @@ edition.workspace = true name = "staging-parachain-info" version = "0.1.0" license = "Apache-2.0" +description = "Pallet to store the parachain ID" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 0ea424e1a2d..0133befa855 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -4,6 +4,7 @@ edition.workspace = true name = "cumulus-ping" version = "0.1.0" license = "Apache-2.0" +description = "Ping Pallet for Cumulus XCM/UMP testing." [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 168c85b2efb..19607eb7c18 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" +description = "Core primitives for Aura in Cumulus" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index e7bf15252f2..23839a10e46 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" +description = "Cumulus related core primitive types and traits" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 5a448f65ada..46b5da57f38 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -3,6 +3,7 @@ name = "cumulus-primitives-parachain-inherent" version = "0.1.0" authors.workspace = true edition.workspace = true +description = "Inherent that needs to be present in every parachain block. Contains messages and a relay chain storage-proof." license = "Apache-2.0" [dependencies] diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 23e2287d1fe..45ce6701988 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" +description = "Helper datatypes for Cumulus" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index 8807e4e2858..b24ac308495 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" +description = "Mocked relay state proof builder for testing Cumulus." [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index d07b77ec4dd..ccfe7f14eb4 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-erasure-coding" version = "1.0.0" +description = "Erasure coding used for Polkadot's availability system" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index b110540140f..c1848f47fc6 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Collator-side subsystem that handles incoming candidate submissions from the parachain." [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index f7ea1d21077..59a6708f17e 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Approval Voting Subsystem of the Polkadot node" [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/av-store/Cargo.toml b/polkadot/node/core/av-store/Cargo.toml index 955fe37d7c3..3fa81d064a8 100644 --- a/polkadot/node/core/av-store/Cargo.toml +++ b/polkadot/node/core/av-store/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-node-core-av-store" +description = "The Availability Store subsystem. Wrapper over the DB that stores availability data and chunks." version = "1.0.0" authors.workspace = true edition.workspace = true diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index e7e6358e8a4..7a6ce5de8cb 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "The Candidate Backing Subsystem. Tracks parachain candidates that can be backed, as well as the issuance of statements about candidates." [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/bitfield-signing/Cargo.toml b/polkadot/node/core/bitfield-signing/Cargo.toml index de38d18d970..712a01b46b1 100644 --- a/polkadot/node/core/bitfield-signing/Cargo.toml +++ b/polkadot/node/core/bitfield-signing/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Bitfield signing subsystem for the Polkadot node" [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index 2178be028bd..154fa20e75d 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "The Chain API subsystem provides access to chain related utility functions like block number to hash conversions." [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index 6061c52b7e8..e2086db708f 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-node-core-dispute-coordinator" version = "1.0.0" +description = "The node-side components that participate in disputes" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/core/parachains-inherent/Cargo.toml b/polkadot/node/core/parachains-inherent/Cargo.toml index 18d91dcfb56..c783f21e24d 100644 --- a/polkadot/node/core/parachains-inherent/Cargo.toml +++ b/polkadot/node/core/parachains-inherent/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Parachains inherent data provider for Polkadot node" [dependencies] futures = "0.3.21" diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 77a59d87f3f..9db1259e61d 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "The Prospective Parachains subsystem. Tracks and handles prospective parachain fragments." [dependencies] futures = "0.3.19" diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 05ea92caa97..13ecb356f2c 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-node-core-provisioner" version = "1.0.0" +description="Responsible for assembling a relay chain block from a set of available parachain candidates" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index b16a501686d..f324f1e79c4 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-node-core-runtime-api" version = "1.0.0" +description="Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index 32b7f489efa..f8a7cc15f87 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-approval-distribution" version = "1.0.0" +description = "Polkadot Approval Distribution subsystem for the distribution of assignments and approvals for approval checks on candidates over the network." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index c3c7aa4e0ea..91ed1026e41 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-availability-distribution" +description = "The Availability Distribution subsystem. Requests the required availability data. Also distributes availability data and chunks to requesters." version = "1.0.0" authors.workspace = true edition.workspace = true diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index 42c3abef547..6048e6323cb 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -1,5 +1,6 @@ [package] name = "polkadot-availability-recovery" +description = "The Availability Recovery subsystem. Handles requests for recovering the availability data of included candidates." version = "1.0.0" authors.workspace = true edition.workspace = true diff --git a/polkadot/node/network/bitfield-distribution/Cargo.toml b/polkadot/node/network/bitfield-distribution/Cargo.toml index 32b0ba2545e..0e61e9cf620 100644 --- a/polkadot/node/network/bitfield-distribution/Cargo.toml +++ b/polkadot/node/network/bitfield-distribution/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-availability-bitfield-distribution" version = "1.0.0" +description = "Polkadot Bitfiled Distribution subsystem, which gossips signed availability bitfields used to compactly determine which backed candidates are available or not based on a 2/3+ quorum." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index df8e881234d..6ae765c252f 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-network-bridge" version = "1.0.0" +description = "The Network Bridge Subsystem — protocol multiplexer for Polkadot." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index e5328cf1662..367a97f35d9 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-collator-protocol" version = "1.0.0" +description = "Polkadot Collator Protocol subsystem. Allows collators and validators to talk to each other." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index 5d8e245d289..f4ea358c41b 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-dispute-distribution" version = "1.0.0" +description = "Polkadot Dispute Distribution subsystem, which ensures all concerned validators are aware of a dispute and have the relevant votes." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index a5bfda73833..a9f68261add 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-gossip-support" version = "1.0.0" +description = "Polkadot Gossip Support subsystem. Responsible for keeping track of session changes and issuing a connection request to the relevant validators on every new session." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index 376ebe0375b..ac1e4443f0c 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "System overseer of the Polkadot node" [dependencies] client = { package = "sc-client-api", path = "../../../substrate/client/api" } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 0ed50086f86..e7a4f4a825c 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -5,6 +5,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Utils to tie different Polkadot components together and allow instantiation of a node." [dependencies] # Substrate Client diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index c7c081e2f0e..b318c2d4be7 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Shared primitives used by Polkadot runtime" [dependencies] bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } diff --git a/polkadot/rpc/Cargo.toml b/polkadot/rpc/Cargo.toml index b5f155b5a65..ce11b26e554 100644 --- a/polkadot/rpc/Cargo.toml +++ b/polkadot/rpc/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Polkadot specific RPC functionality." [dependencies] jsonrpsee = { version = "0.16.2", features = ["server"] } diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index f65717519d5..59c76a6cabb 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Helper crate for generating slot ranges for the Polkadot runtime." [dependencies] paste = "1.0" diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index cfa6bf3dafb..cdfab82d00c 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Runtime metric interface for the Polkadot node" [dependencies] sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false} diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 004a0545d55..b6800fc0844 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -1,6 +1,7 @@ [package] name = "polkadot-runtime-parachains" version = "1.0.0" +description = "Relay Chain runtime code responsible for Parachains." authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/statement-table/Cargo.toml b/polkadot/statement-table/Cargo.toml index 91ca2015af3..d2518591d26 100644 --- a/polkadot/statement-table/Cargo.toml +++ b/polkadot/statement-table/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "Stores messages other authorities issue about candidates in Polkadot." [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index 95ca57ea728..ed29001aa4f 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -4,6 +4,7 @@ version = "1.0.0" authors.workspace = true edition.workspace = true license.workspace = true +description = "CLI to generate voter bags for Polkadot runtimes" [dependencies] clap = { version = "4.4.6", features = ["derive"] } diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 88df81a3dad..5be0bbe4ae5 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -4,6 +4,7 @@ authors.workspace = true edition.workspace = true license.workspace = true version = "1.0.0" +description = "Benchmarks for the XCM pallet" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml index b75816a5ea3..e7e67ce889a 100644 --- a/substrate/bin/minimal/runtime/Cargo.toml +++ b/substrate/bin/minimal/runtime/Cargo.toml @@ -1,7 +1,11 @@ [package] name = "minimal-runtime" version = "0.1.0" -edition = "2021" +authors.workspace = true +description = "A minimal Substrate example runtime" +edition.workspace = true +repository.workspace = true +license.workspace = true [dependencies] parity-scale-codec = { version = "3.0.0", default-features = false } -- GitLab From 0abbd60de1b9c65647c3962f368a5a34d31803d5 Mon Sep 17 00:00:00 2001 From: Lulu Date: Thu, 9 Nov 2023 20:23:34 +0100 Subject: [PATCH 020/199] Add license to tracking-allocator and add staging-prefix (#2259) --- polkadot/node/tracking-allocator/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml index 2ea3f69c7d0..a7842a3e62e 100644 --- a/polkadot/node/tracking-allocator/Cargo.toml +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -4,3 +4,4 @@ description = "Tracking allocator to control the amount of memory consumed by th version = "1.0.0" authors.workspace = true edition.workspace = true +license.workspace = true -- GitLab From e8029a776196ab2339eb47b6be4cc3ce4b041967 Mon Sep 17 00:00:00 2001 From: Lulu Date: Thu, 9 Nov 2023 21:17:48 +0100 Subject: [PATCH 021/199] Don't publish frame and deps (#2260) --- substrate/bin/minimal/runtime/Cargo.toml | 1 + substrate/frame/Cargo.toml | 1 + substrate/frame/examples/Cargo.toml | 1 + substrate/frame/examples/frame-crate/Cargo.toml | 1 + 4 files changed, 4 insertions(+) diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml index e7e67ce889a..85d56d0638a 100644 --- a/substrate/bin/minimal/runtime/Cargo.toml +++ b/substrate/bin/minimal/runtime/Cargo.toml @@ -6,6 +6,7 @@ description = "A minimal Substrate example runtime" edition.workspace = true repository.workspace = true license.workspace = true +publish = false [dependencies] parity-scale-codec = { version = "3.0.0", default-features = false } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index f7b35aba358..9f2f73ffb15 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -7,6 +7,7 @@ license = "Apache-2.0" homepage = "paritytech.github.io" repository.workspace = true description = "The single package to get you started with building frame pallets and runtimes" +publish = false [package.metadata.docs.rs] # enable `experimental` feature for docs diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 98c4e51889b..1b215022715 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -7,6 +7,7 @@ license = "Apache-2.0" homepage = "https://substrate.io" repository.workspace = true description = "The single package with examples of various types of FRAME pallets" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index 854ee8b55c8..ceb8c7bfb81 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -7,6 +7,7 @@ license = "MIT-0" homepage = "https://substrate.io" repository.workspace = true description = "FRAME example pallet with umbrella crate" +publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] -- GitLab From b0d0fb31d8758602387e4ba1cb712e90dff8b2a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 9 Nov 2023 21:44:23 +0100 Subject: [PATCH 022/199] sc-state-db: Keep track of `LAST_PRUNED` after warp syncing (#2228) When warp syncing we import the target block with all its state. However, we didn't store the `LAST_PRUNED` block which would then lead to `pruning` to forget about the imported block after a restart of the node. We just set `LAST_PRUNED` to the parent block of the warp sync target block to fix this issue. --- substrate/client/state-db/src/pruning.rs | 42 ++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/substrate/client/state-db/src/pruning.rs b/substrate/client/state-db/src/pruning.rs index 7bee2b1d99a..623d30b098b 100644 --- a/substrate/client/state-db/src/pruning.rs +++ b/substrate/client/state-db/src/pruning.rs @@ -406,12 +406,24 @@ impl RefWindow { commit: &mut CommitSet, ) -> Result<(), Error> { if self.base == 0 && self.is_empty() && number > 0 { - // assume that parent was canonicalized + // This branch is taken if the node imports the target block of a warp sync. + // assume that the block was canonicalized self.base = number; + // The parent of the block was the last block that got pruned. + commit + .meta + .inserted + .push((to_meta_key(LAST_PRUNED, &()), (number - 1).encode())); } else if (self.base + self.window_size()) != number { return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) } - trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); + trace!( + target: "state-db", + "Adding to pruning window: {:?} ({} inserted, {} deleted)", + hash, + commit.data.inserted.len(), + commit.data.deleted.len(), + ); let inserted = if matches!(self.queue, DeathRowQueue::Mem { .. }) { commit.data.inserted.iter().map(|(k, _)| k.clone()).collect() } else { @@ -869,4 +881,30 @@ mod tests { pruning.prune_one(&mut commit).unwrap(); db.commit(&commit); } + + /// Ensure that after warp syncing the state is stored correctly in the db. The warp sync target + /// block is imported with all its state at once. This test ensures that after a restart + /// `pruning` still knows that this block was imported. + #[test] + fn store_correct_state_after_warp_syncing() { + for count_insertions in [true, false] { + let mut db = make_db(&[]); + let mut pruning: RefWindow = + RefWindow::new(db.clone(), DEFAULT_MAX_BLOCK_CONSTRAINT, count_insertions).unwrap(); + let block = 10000; + + // import blocks + let mut commit = make_commit(&[], &[]); + pruning.note_canonical(&block, block, &mut commit).unwrap(); + push_last_canonicalized(block, &mut commit); + db.commit(&commit); + + // load a new queue from db + // `cache` should be the same + let pruning: RefWindow = + RefWindow::new(db, DEFAULT_MAX_BLOCK_CONSTRAINT, count_insertions).unwrap(); + + assert_eq!(HaveBlock::Yes, pruning.have_block(&block, block)); + } + } } -- GitLab From 03ee44d9e12ba1a88b4e55757b402039d80bed2e Mon Sep 17 00:00:00 2001 From: Lulu Date: Thu, 9 Nov 2023 23:27:08 +0100 Subject: [PATCH 023/199] Add license to tracking-allocator and add staging-prefix (#2261) The staging- rename commit was missing from the last PR for some reason. --- Cargo.lock | 10 +++++----- polkadot/node/core/pvf/prepare-worker/Cargo.toml | 2 +- polkadot/node/tracking-allocator/Cargo.toml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54fa7c6485b..5c1bc13b72f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12545,10 +12545,10 @@ dependencies = [ "sc-executor-common", "sc-executor-wasmtime", "sp-maybe-compressed-blob", + "staging-tracking-allocator", "tikv-jemalloc-ctl", "tikv-jemallocator", "tracing-gum", - "tracking-allocator", ] [[package]] @@ -18328,6 +18328,10 @@ dependencies = [ "sp-std 8.0.0", ] +[[package]] +name = "staging-tracking-allocator" +version = "1.0.0" + [[package]] name = "staging-xcm" version = "1.0.0" @@ -19622,10 +19626,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "tracking-allocator" -version = "1.0.0" - [[package]] name = "trie-bench" version = "0.38.0" diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index dae88afc555..e21583ecc8b 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -11,7 +11,7 @@ cfg-if = "1.0" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" rayon = "1.5.1" -tracking-allocator = { path = "../../../tracking-allocator" } +tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } diff --git a/polkadot/node/tracking-allocator/Cargo.toml b/polkadot/node/tracking-allocator/Cargo.toml index a7842a3e62e..b1b33091344 100644 --- a/polkadot/node/tracking-allocator/Cargo.toml +++ b/polkadot/node/tracking-allocator/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "tracking-allocator" +name = "staging-tracking-allocator" description = "Tracking allocator to control the amount of memory consumed by the process" version = "1.0.0" authors.workspace = true -- GitLab From 64effd0e6f49f0c935af2a7790b230e46d46d4f8 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 10 Nov 2023 12:22:47 +0100 Subject: [PATCH 024/199] Contracts move fixtures to new crate (#2246) Small PR that introduce a new crate that will host RISC-V & wasm fixtures for testing pallet-contracts --- Cargo.lock | 10 +++++ Cargo.toml | 1 + substrate/frame/contracts/Cargo.toml | 2 + substrate/frame/contracts/fixtures/Cargo.toml | 18 ++++++++ .../account_reentrance_count_call.wat | 0 .../add_remove_delegate_dependency.wat | 0 .../contracts/fixtures/{ => data}/balance.wat | 0 .../contracts/fixtures/{ => data}/call.wat | 0 .../fixtures/{ => data}/call_return_code.wat | 0 .../fixtures/{ => data}/call_runtime.wat | 0 .../{ => data}/call_runtime_and_call.wat | 0 .../fixtures/{ => data}/call_with_limit.wat | 0 .../fixtures/{ => data}/caller_contract.wat | 0 .../fixtures/{ => data}/chain_extension.wat | 0 .../chain_extension_temp_storage.wat | 0 .../{ => data}/create_storage_and_call.wat | 0 .../create_storage_and_instantiate.wat | 0 .../fixtures/{ => data}/crypto_hashes.wat | 0 .../{ => data}/debug_message_invalid_utf8.wat | 0 .../debug_message_logging_disabled.wat | 0 .../{ => data}/debug_message_works.wat | 0 .../fixtures/{ => data}/delegate_call.wat | 0 .../fixtures/{ => data}/delegate_call_lib.wat | 0 .../{ => data}/delegate_call_simple.wat | 0 .../{ => data}/destroy_and_transfer.wat | 0 .../contracts/fixtures/{ => data}/drain.wat | 0 .../contracts/fixtures/{ => data}/dummy.wat | 0 .../fixtures/{ => data}/ecdsa_recover.wat | 0 .../{ => data}/event_and_return_on_deploy.wat | 0 .../fixtures/{ => data}/event_size.wat | 0 .../fixtures/{ => data}/float_instruction.wat | 0 .../{ => data}/instantiate_return_code.wat | 0 .../{ => data}/invalid_contract_no_call.wat | 0 .../{ => data}/invalid_contract_no_memory.wat | 0 .../fixtures/{ => data}/invalid_module.wat | 0 .../fixtures/{ => data}/multi_store.wat | 0 .../{ => data}/new_set_code_hash_contract.wat | 0 .../fixtures/{ => data}/ok_trap_revert.wat | 0 .../{ => data}/reentrance_count_call.wat | 0 .../reentrance_count_delegated_call.wat | 0 .../fixtures/{ => data}/return_with_data.wat | 0 .../fixtures/{ => data}/run_out_of_gas.wat | 0 .../{ => data}/run_out_of_gas_start_fn.wat | 0 .../fixtures/{ => data}/seal_input_noop.wat | 0 .../fixtures/{ => data}/seal_input_once.wat | 0 .../fixtures/{ => data}/seal_input_twice.wat | 0 .../fixtures/{ => data}/self_destruct.wat | 0 .../self_destructing_constructor.wat | 0 .../fixtures/{ => data}/set_code_hash.wat | 0 .../fixtures/{ => data}/set_empty_storage.wat | 0 .../fixtures/{ => data}/sr25519_verify.wat | 0 .../fixtures/{ => data}/storage_size.wat | 0 .../fixtures/{ => data}/store_call.wat | 0 .../fixtures/{ => data}/store_deploy.wat | 0 .../{ => data}/transfer_return_code.wat | 0 substrate/frame/contracts/fixtures/src/lib.rs | 42 +++++++++++++++++++ substrate/frame/contracts/src/tests.rs | 24 +---------- 57 files changed, 74 insertions(+), 23 deletions(-) create mode 100644 substrate/frame/contracts/fixtures/Cargo.toml rename substrate/frame/contracts/fixtures/{ => data}/account_reentrance_count_call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/add_remove_delegate_dependency.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/balance.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/call_return_code.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/call_runtime.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/call_runtime_and_call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/call_with_limit.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/caller_contract.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/chain_extension.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/chain_extension_temp_storage.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/create_storage_and_call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/create_storage_and_instantiate.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/crypto_hashes.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/debug_message_invalid_utf8.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/debug_message_logging_disabled.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/debug_message_works.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/delegate_call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/delegate_call_lib.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/delegate_call_simple.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/destroy_and_transfer.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/drain.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/dummy.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/ecdsa_recover.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/event_and_return_on_deploy.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/event_size.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/float_instruction.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/instantiate_return_code.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/invalid_contract_no_call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/invalid_contract_no_memory.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/invalid_module.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/multi_store.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/new_set_code_hash_contract.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/ok_trap_revert.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/reentrance_count_call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/reentrance_count_delegated_call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/return_with_data.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/run_out_of_gas.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/run_out_of_gas_start_fn.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/seal_input_noop.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/seal_input_once.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/seal_input_twice.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/self_destruct.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/self_destructing_constructor.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/set_code_hash.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/set_empty_storage.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/sr25519_verify.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/storage_size.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/store_call.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/store_deploy.wat (100%) rename substrate/frame/contracts/fixtures/{ => data}/transfer_return_code.wat (100%) create mode 100644 substrate/frame/contracts/fixtures/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 5c1bc13b72f..2a091ce6817 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9701,6 +9701,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "pallet-balances", + "pallet-contracts-fixtures", "pallet-contracts-primitives", "pallet-contracts-proc-macro", "pallet-insecure-randomness-collective-flip", @@ -9725,6 +9726,15 @@ dependencies = [ "wat", ] +[[package]] +name = "pallet-contracts-fixtures" +version = "1.0.0" +dependencies = [ + "frame-system", + "sp-runtime", + "wat", +] + [[package]] name = "pallet-contracts-primitives" version = "24.0.0" diff --git a/Cargo.toml b/Cargo.toml index 3b3469e5483..42bbac37a6c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,6 +293,7 @@ members = [ "substrate/frame/child-bounties", "substrate/frame/collective", "substrate/frame/contracts", + "substrate/frame/contracts/fixtures", "substrate/frame/contracts/primitives", "substrate/frame/contracts/proc-macro", "substrate/frame/conviction-voting", diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index d80577f9d44..0eb50c2b0ba 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -54,6 +54,7 @@ assert_matches = "1" env_logger = "0.9" pretty_assertions = "1" wat = "1" +pallet-contracts-fixtures = { path = "./fixtures" } # Substrate Dependencies pallet-balances = { path = "../balances" } @@ -73,6 +74,7 @@ std = [ "frame-system/std", "log/std", "pallet-balances?/std", + "pallet-contracts-fixtures/std", "pallet-contracts-primitives/std", "pallet-contracts-proc-macro/full", "pallet-insecure-randomness-collective-flip/std", diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml new file mode 100644 index 00000000000..b44f36f2a5f --- /dev/null +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "pallet-contracts-fixtures" +publish = false +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +description = "Fixtures for testing contracts pallet." + +[dependencies] +wat = "1" +frame-system = { path = "../../system", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false} + +[features] +default = [ "std" ] +std = [ "frame-system/std", "sp-runtime/std" ] + diff --git a/substrate/frame/contracts/fixtures/account_reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/account_reentrance_count_call.wat rename to substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat diff --git a/substrate/frame/contracts/fixtures/add_remove_delegate_dependency.wat b/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat similarity index 100% rename from substrate/frame/contracts/fixtures/add_remove_delegate_dependency.wat rename to substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat diff --git a/substrate/frame/contracts/fixtures/balance.wat b/substrate/frame/contracts/fixtures/data/balance.wat similarity index 100% rename from substrate/frame/contracts/fixtures/balance.wat rename to substrate/frame/contracts/fixtures/data/balance.wat diff --git a/substrate/frame/contracts/fixtures/call.wat b/substrate/frame/contracts/fixtures/data/call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call.wat rename to substrate/frame/contracts/fixtures/data/call.wat diff --git a/substrate/frame/contracts/fixtures/call_return_code.wat b/substrate/frame/contracts/fixtures/data/call_return_code.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call_return_code.wat rename to substrate/frame/contracts/fixtures/data/call_return_code.wat diff --git a/substrate/frame/contracts/fixtures/call_runtime.wat b/substrate/frame/contracts/fixtures/data/call_runtime.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call_runtime.wat rename to substrate/frame/contracts/fixtures/data/call_runtime.wat diff --git a/substrate/frame/contracts/fixtures/call_runtime_and_call.wat b/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call_runtime_and_call.wat rename to substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat diff --git a/substrate/frame/contracts/fixtures/call_with_limit.wat b/substrate/frame/contracts/fixtures/data/call_with_limit.wat similarity index 100% rename from substrate/frame/contracts/fixtures/call_with_limit.wat rename to substrate/frame/contracts/fixtures/data/call_with_limit.wat diff --git a/substrate/frame/contracts/fixtures/caller_contract.wat b/substrate/frame/contracts/fixtures/data/caller_contract.wat similarity index 100% rename from substrate/frame/contracts/fixtures/caller_contract.wat rename to substrate/frame/contracts/fixtures/data/caller_contract.wat diff --git a/substrate/frame/contracts/fixtures/chain_extension.wat b/substrate/frame/contracts/fixtures/data/chain_extension.wat similarity index 100% rename from substrate/frame/contracts/fixtures/chain_extension.wat rename to substrate/frame/contracts/fixtures/data/chain_extension.wat diff --git a/substrate/frame/contracts/fixtures/chain_extension_temp_storage.wat b/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat similarity index 100% rename from substrate/frame/contracts/fixtures/chain_extension_temp_storage.wat rename to substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat diff --git a/substrate/frame/contracts/fixtures/create_storage_and_call.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/create_storage_and_call.wat rename to substrate/frame/contracts/fixtures/data/create_storage_and_call.wat diff --git a/substrate/frame/contracts/fixtures/create_storage_and_instantiate.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat similarity index 100% rename from substrate/frame/contracts/fixtures/create_storage_and_instantiate.wat rename to substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat diff --git a/substrate/frame/contracts/fixtures/crypto_hashes.wat b/substrate/frame/contracts/fixtures/data/crypto_hashes.wat similarity index 100% rename from substrate/frame/contracts/fixtures/crypto_hashes.wat rename to substrate/frame/contracts/fixtures/data/crypto_hashes.wat diff --git a/substrate/frame/contracts/fixtures/debug_message_invalid_utf8.wat b/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat similarity index 100% rename from substrate/frame/contracts/fixtures/debug_message_invalid_utf8.wat rename to substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat diff --git a/substrate/frame/contracts/fixtures/debug_message_logging_disabled.wat b/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat similarity index 100% rename from substrate/frame/contracts/fixtures/debug_message_logging_disabled.wat rename to substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat diff --git a/substrate/frame/contracts/fixtures/debug_message_works.wat b/substrate/frame/contracts/fixtures/data/debug_message_works.wat similarity index 100% rename from substrate/frame/contracts/fixtures/debug_message_works.wat rename to substrate/frame/contracts/fixtures/data/debug_message_works.wat diff --git a/substrate/frame/contracts/fixtures/delegate_call.wat b/substrate/frame/contracts/fixtures/data/delegate_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/delegate_call.wat rename to substrate/frame/contracts/fixtures/data/delegate_call.wat diff --git a/substrate/frame/contracts/fixtures/delegate_call_lib.wat b/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat similarity index 100% rename from substrate/frame/contracts/fixtures/delegate_call_lib.wat rename to substrate/frame/contracts/fixtures/data/delegate_call_lib.wat diff --git a/substrate/frame/contracts/fixtures/delegate_call_simple.wat b/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat similarity index 100% rename from substrate/frame/contracts/fixtures/delegate_call_simple.wat rename to substrate/frame/contracts/fixtures/data/delegate_call_simple.wat diff --git a/substrate/frame/contracts/fixtures/destroy_and_transfer.wat b/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat similarity index 100% rename from substrate/frame/contracts/fixtures/destroy_and_transfer.wat rename to substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat diff --git a/substrate/frame/contracts/fixtures/drain.wat b/substrate/frame/contracts/fixtures/data/drain.wat similarity index 100% rename from substrate/frame/contracts/fixtures/drain.wat rename to substrate/frame/contracts/fixtures/data/drain.wat diff --git a/substrate/frame/contracts/fixtures/dummy.wat b/substrate/frame/contracts/fixtures/data/dummy.wat similarity index 100% rename from substrate/frame/contracts/fixtures/dummy.wat rename to substrate/frame/contracts/fixtures/data/dummy.wat diff --git a/substrate/frame/contracts/fixtures/ecdsa_recover.wat b/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat similarity index 100% rename from substrate/frame/contracts/fixtures/ecdsa_recover.wat rename to substrate/frame/contracts/fixtures/data/ecdsa_recover.wat diff --git a/substrate/frame/contracts/fixtures/event_and_return_on_deploy.wat b/substrate/frame/contracts/fixtures/data/event_and_return_on_deploy.wat similarity index 100% rename from substrate/frame/contracts/fixtures/event_and_return_on_deploy.wat rename to substrate/frame/contracts/fixtures/data/event_and_return_on_deploy.wat diff --git a/substrate/frame/contracts/fixtures/event_size.wat b/substrate/frame/contracts/fixtures/data/event_size.wat similarity index 100% rename from substrate/frame/contracts/fixtures/event_size.wat rename to substrate/frame/contracts/fixtures/data/event_size.wat diff --git a/substrate/frame/contracts/fixtures/float_instruction.wat b/substrate/frame/contracts/fixtures/data/float_instruction.wat similarity index 100% rename from substrate/frame/contracts/fixtures/float_instruction.wat rename to substrate/frame/contracts/fixtures/data/float_instruction.wat diff --git a/substrate/frame/contracts/fixtures/instantiate_return_code.wat b/substrate/frame/contracts/fixtures/data/instantiate_return_code.wat similarity index 100% rename from substrate/frame/contracts/fixtures/instantiate_return_code.wat rename to substrate/frame/contracts/fixtures/data/instantiate_return_code.wat diff --git a/substrate/frame/contracts/fixtures/invalid_contract_no_call.wat b/substrate/frame/contracts/fixtures/data/invalid_contract_no_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/invalid_contract_no_call.wat rename to substrate/frame/contracts/fixtures/data/invalid_contract_no_call.wat diff --git a/substrate/frame/contracts/fixtures/invalid_contract_no_memory.wat b/substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat similarity index 100% rename from substrate/frame/contracts/fixtures/invalid_contract_no_memory.wat rename to substrate/frame/contracts/fixtures/data/invalid_contract_no_memory.wat diff --git a/substrate/frame/contracts/fixtures/invalid_module.wat b/substrate/frame/contracts/fixtures/data/invalid_module.wat similarity index 100% rename from substrate/frame/contracts/fixtures/invalid_module.wat rename to substrate/frame/contracts/fixtures/data/invalid_module.wat diff --git a/substrate/frame/contracts/fixtures/multi_store.wat b/substrate/frame/contracts/fixtures/data/multi_store.wat similarity index 100% rename from substrate/frame/contracts/fixtures/multi_store.wat rename to substrate/frame/contracts/fixtures/data/multi_store.wat diff --git a/substrate/frame/contracts/fixtures/new_set_code_hash_contract.wat b/substrate/frame/contracts/fixtures/data/new_set_code_hash_contract.wat similarity index 100% rename from substrate/frame/contracts/fixtures/new_set_code_hash_contract.wat rename to substrate/frame/contracts/fixtures/data/new_set_code_hash_contract.wat diff --git a/substrate/frame/contracts/fixtures/ok_trap_revert.wat b/substrate/frame/contracts/fixtures/data/ok_trap_revert.wat similarity index 100% rename from substrate/frame/contracts/fixtures/ok_trap_revert.wat rename to substrate/frame/contracts/fixtures/data/ok_trap_revert.wat diff --git a/substrate/frame/contracts/fixtures/reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/reentrance_count_call.wat rename to substrate/frame/contracts/fixtures/data/reentrance_count_call.wat diff --git a/substrate/frame/contracts/fixtures/reentrance_count_delegated_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/reentrance_count_delegated_call.wat rename to substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat diff --git a/substrate/frame/contracts/fixtures/return_with_data.wat b/substrate/frame/contracts/fixtures/data/return_with_data.wat similarity index 100% rename from substrate/frame/contracts/fixtures/return_with_data.wat rename to substrate/frame/contracts/fixtures/data/return_with_data.wat diff --git a/substrate/frame/contracts/fixtures/run_out_of_gas.wat b/substrate/frame/contracts/fixtures/data/run_out_of_gas.wat similarity index 100% rename from substrate/frame/contracts/fixtures/run_out_of_gas.wat rename to substrate/frame/contracts/fixtures/data/run_out_of_gas.wat diff --git a/substrate/frame/contracts/fixtures/run_out_of_gas_start_fn.wat b/substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat similarity index 100% rename from substrate/frame/contracts/fixtures/run_out_of_gas_start_fn.wat rename to substrate/frame/contracts/fixtures/data/run_out_of_gas_start_fn.wat diff --git a/substrate/frame/contracts/fixtures/seal_input_noop.wat b/substrate/frame/contracts/fixtures/data/seal_input_noop.wat similarity index 100% rename from substrate/frame/contracts/fixtures/seal_input_noop.wat rename to substrate/frame/contracts/fixtures/data/seal_input_noop.wat diff --git a/substrate/frame/contracts/fixtures/seal_input_once.wat b/substrate/frame/contracts/fixtures/data/seal_input_once.wat similarity index 100% rename from substrate/frame/contracts/fixtures/seal_input_once.wat rename to substrate/frame/contracts/fixtures/data/seal_input_once.wat diff --git a/substrate/frame/contracts/fixtures/seal_input_twice.wat b/substrate/frame/contracts/fixtures/data/seal_input_twice.wat similarity index 100% rename from substrate/frame/contracts/fixtures/seal_input_twice.wat rename to substrate/frame/contracts/fixtures/data/seal_input_twice.wat diff --git a/substrate/frame/contracts/fixtures/self_destruct.wat b/substrate/frame/contracts/fixtures/data/self_destruct.wat similarity index 100% rename from substrate/frame/contracts/fixtures/self_destruct.wat rename to substrate/frame/contracts/fixtures/data/self_destruct.wat diff --git a/substrate/frame/contracts/fixtures/self_destructing_constructor.wat b/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat similarity index 100% rename from substrate/frame/contracts/fixtures/self_destructing_constructor.wat rename to substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat diff --git a/substrate/frame/contracts/fixtures/set_code_hash.wat b/substrate/frame/contracts/fixtures/data/set_code_hash.wat similarity index 100% rename from substrate/frame/contracts/fixtures/set_code_hash.wat rename to substrate/frame/contracts/fixtures/data/set_code_hash.wat diff --git a/substrate/frame/contracts/fixtures/set_empty_storage.wat b/substrate/frame/contracts/fixtures/data/set_empty_storage.wat similarity index 100% rename from substrate/frame/contracts/fixtures/set_empty_storage.wat rename to substrate/frame/contracts/fixtures/data/set_empty_storage.wat diff --git a/substrate/frame/contracts/fixtures/sr25519_verify.wat b/substrate/frame/contracts/fixtures/data/sr25519_verify.wat similarity index 100% rename from substrate/frame/contracts/fixtures/sr25519_verify.wat rename to substrate/frame/contracts/fixtures/data/sr25519_verify.wat diff --git a/substrate/frame/contracts/fixtures/storage_size.wat b/substrate/frame/contracts/fixtures/data/storage_size.wat similarity index 100% rename from substrate/frame/contracts/fixtures/storage_size.wat rename to substrate/frame/contracts/fixtures/data/storage_size.wat diff --git a/substrate/frame/contracts/fixtures/store_call.wat b/substrate/frame/contracts/fixtures/data/store_call.wat similarity index 100% rename from substrate/frame/contracts/fixtures/store_call.wat rename to substrate/frame/contracts/fixtures/data/store_call.wat diff --git a/substrate/frame/contracts/fixtures/store_deploy.wat b/substrate/frame/contracts/fixtures/data/store_deploy.wat similarity index 100% rename from substrate/frame/contracts/fixtures/store_deploy.wat rename to substrate/frame/contracts/fixtures/data/store_deploy.wat diff --git a/substrate/frame/contracts/fixtures/transfer_return_code.wat b/substrate/frame/contracts/fixtures/data/transfer_return_code.wat similarity index 100% rename from substrate/frame/contracts/fixtures/transfer_return_code.wat rename to substrate/frame/contracts/fixtures/data/transfer_return_code.wat diff --git a/substrate/frame/contracts/fixtures/src/lib.rs b/substrate/frame/contracts/fixtures/src/lib.rs new file mode 100644 index 00000000000..32f4023e644 --- /dev/null +++ b/substrate/frame/contracts/fixtures/src/lib.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_runtime::traits::Hash; +use std::{env::var, path::PathBuf}; + +fn fixtures_root_dir() -> PathBuf { + match (var("CARGO_MANIFEST_DIR"), var("CARGO_PKG_NAME")) { + // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder + (Err(_), _) => "substrate/frame/contracts/fixtures/data".into(), + (Ok(path), Ok(s)) if s == "pallet-contracts" => PathBuf::from(path).join("fixtures/data"), + (Ok(_), pkg_name) => panic!("Failed to resolve fixture dir for tests from {pkg_name:?}."), + } +} + +/// Load a given wasm module represented by a .wat file and returns a wasm binary contents along +/// with it's hash. +/// +/// The fixture files are located under the `fixtures/` directory. +pub fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::Output)> +where + T: frame_system::Config, +{ + let fixture_path = fixtures_root_dir().join(format!("{fixture_name}.wat")); + let wasm_binary = wat::parse_file(fixture_path)?; + let code_hash = T::Hashing::hash(&wasm_binary); + Ok((wasm_binary, code_hash)) +} diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 3cc5e7cf4a1..e7784b02b74 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -53,6 +53,7 @@ use frame_support::{ weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; use frame_system::{EventRecord, Phase}; +use pallet_contracts_fixtures::compile_module; use pallet_contracts_primitives::CodeUploadReturnValue; use pretty_assertions::{assert_eq, assert_ne}; use sp_core::ByteArray; @@ -555,29 +556,6 @@ impl ExtBuilder { } } -/// Load a given wasm module represented by a .wat file and returns a wasm binary contents along -/// with it's hash. -/// -/// The fixture files are located under the `fixtures/` directory. -fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::Output)> -where - T: frame_system::Config, -{ - let fixture_path = [ - // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder - std::env::var("CARGO_MANIFEST_DIR") - .as_deref() - .unwrap_or("substrate/frame/contracts"), - "/fixtures/", - fixture_name, - ".wat", - ] - .concat(); - let wasm_binary = wat::parse_file(fixture_path)?; - let code_hash = T::Hashing::hash(&wasm_binary); - Ok((wasm_binary, code_hash)) -} - fn initialize_block(number: u64) { System::reset_events(); System::initialize(&number, &[0u8; 32].into(), &Default::default()); -- GitLab From 3f0383a5b8b6861c0ee0c64a17bed78c07956ba5 Mon Sep 17 00:00:00 2001 From: Vincent Geddes Date: Fri, 10 Nov 2023 15:12:47 +0200 Subject: [PATCH 025/199] [pallet-message-queue] Implement impl_trait_for_tuples for QueuePausedQuery (#2227) These changes are required so that the bridgehub system runtimes can more easily be configured with multiple message processors Example usage: ```rust use frame_support::traits::QueuePausedQuery; impl pallet_message_queue::Config for Runtime { type QueuePausedQuery = (A, B, C) } --- substrate/frame/support/src/traits/messages.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/substrate/frame/support/src/traits/messages.rs b/substrate/frame/support/src/traits/messages.rs index 654380dbb0a..58815b107c8 100644 --- a/substrate/frame/support/src/traits/messages.rs +++ b/substrate/frame/support/src/traits/messages.rs @@ -240,8 +240,14 @@ pub trait QueuePausedQuery { fn is_paused(origin: &Origin) -> bool; } -impl QueuePausedQuery for () { - fn is_paused(_: &Origin) -> bool { +#[impl_trait_for_tuples::impl_for_tuples(8)] +impl QueuePausedQuery for Tuple { + fn is_paused(origin: &Origin) -> bool { + for_tuples!( #( + if Tuple::is_paused(origin) { + return true; + } + )* ); false } } -- GitLab From 84ddbaf68445fed23a88a086ee37bdcdbcb7356a Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Fri, 10 Nov 2023 17:14:05 +0400 Subject: [PATCH 026/199] Improve `VersionedMigration` naming conventions (#2264) As suggested by @ggwpez (https://github.com/paritytech/polkadot-sdk/pull/2142#discussion_r1388145872), remove the `VersionChecked` prefix from version checked migrations (but leave `VersionUnchecked` prefixes) --------- Co-authored-by: command-bot <> --- .../common/src/assigned_slots/migration.rs | 10 +++++----- .../common/src/paras_registrar/migration.rs | 15 +++++++-------- polkadot/runtime/rococo/src/lib.rs | 6 +++--- polkadot/runtime/westend/src/lib.rs | 4 ++-- polkadot/xcm/pallet-xcm/src/migration.rs | 4 ++-- substrate/frame/society/src/migrations.rs | 15 +++++++-------- substrate/frame/support/src/migrations.rs | 4 ++-- 7 files changed, 28 insertions(+), 30 deletions(-) diff --git a/polkadot/runtime/common/src/assigned_slots/migration.rs b/polkadot/runtime/common/src/assigned_slots/migration.rs index 0e88b27a1ff..ba3108c0aa3 100644 --- a/polkadot/runtime/common/src/assigned_slots/migration.rs +++ b/polkadot/runtime/common/src/assigned_slots/migration.rs @@ -25,8 +25,8 @@ use sp_std::vec::Vec; pub mod v1 { use super::*; - pub struct MigrateToV1(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV1 { + pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { let onchain_version = Pallet::::on_chain_storage_version(); @@ -60,13 +60,13 @@ pub mod v1 { } } - /// [`MigrateToV1`] wrapped in a + /// [`VersionUncheckedMigrateToV1`] wrapped in a /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the /// migration is only performed when on-chain version is 0. - pub type VersionCheckedMigrateToV1 = frame_support::migrations::VersionedMigration< + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< 0, 1, - MigrateToV1, + VersionUncheckedMigrateToV1, Pallet, ::DbWeight, >; diff --git a/polkadot/runtime/common/src/paras_registrar/migration.rs b/polkadot/runtime/common/src/paras_registrar/migration.rs index b767985489d..f977674a1e4 100644 --- a/polkadot/runtime/common/src/paras_registrar/migration.rs +++ b/polkadot/runtime/common/src/paras_registrar/migration.rs @@ -60,11 +60,10 @@ impl> OnRuntimeUpgrade } } -pub type VersionCheckedMigrateToV1 = - frame_support::migrations::VersionedMigration< - 0, - 1, - VersionUncheckedMigrateToV1, - super::Pallet, - ::DbWeight, - >; +pub type MigrateToV1 = frame_support::migrations::VersionedMigration< + 0, + 1, + VersionUncheckedMigrateToV1, + super::Pallet, + ::DbWeight, +>; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 4f542354346..697d22c311a 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1472,14 +1472,14 @@ pub mod migrations { /// Unreleased migrations. Add new ones here: pub type Unreleased = ( - pallet_society::migrations::VersionCheckedMigrateToV2, + pallet_society::migrations::MigrateToV2, pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, - assigned_slots::migration::v1::VersionCheckedMigrateToV1, + assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::VersionCheckedMigrateToV1, + paras_registrar::migration::MigrateToV1, pallet_referenda::migration::v1::MigrateV0ToV1, pallet_referenda::migration::v1::MigrateV0ToV1, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index ec94973af4f..fe9ed22f437 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1549,12 +1549,12 @@ pub mod migrations { pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, pallet_staking::migrations::v14::MigrateToV14, - assigned_slots::migration::v1::VersionCheckedMigrateToV1, + assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::VersionCheckedMigrateToV1, + paras_registrar::migration::MigrateToV1, pallet_nomination_pools::migration::versioned_migrations::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, pallet_nomination_pools::migration::versioned_migrations::V6ToV7, diff --git a/polkadot/xcm/pallet-xcm/src/migration.rs b/polkadot/xcm/pallet-xcm/src/migration.rs index ba3cdb5c51e..2793afcc910 100644 --- a/polkadot/xcm/pallet-xcm/src/migration.rs +++ b/polkadot/xcm/pallet-xcm/src/migration.rs @@ -31,7 +31,7 @@ pub mod v1 { /// checking, the version checking is not complete as it will begin failing after the upgrade is /// enacted on-chain. /// - /// Use experimental [`VersionCheckedMigrateToV1`] instead. + /// Use experimental [`MigrateToV1`] instead. pub struct VersionUncheckedMigrateToV1(sp_std::marker::PhantomData); impl OnRuntimeUpgrade for VersionUncheckedMigrateToV1 { fn on_runtime_upgrade() -> Weight { @@ -65,7 +65,7 @@ pub mod v1 { /// /// Wrapped in [`frame_support::migrations::VersionedMigration`] so the pre/post checks don't /// begin failing after the upgrade is enacted on-chain. - pub type VersionCheckedMigrateToV1 = frame_support::migrations::VersionedMigration< + pub type MigrateToV1 = frame_support::migrations::VersionedMigration< 0, 1, VersionUncheckedMigrateToV1, diff --git a/substrate/frame/society/src/migrations.rs b/substrate/frame/society/src/migrations.rs index 553eea1a795..a995c9d7be7 100644 --- a/substrate/frame/society/src/migrations.rs +++ b/substrate/frame/society/src/migrations.rs @@ -95,14 +95,13 @@ impl< /// [`VersionUncheckedMigrateToV2`] wrapped in a [`frame_support::migrations::VersionedMigration`], /// ensuring the migration is only performed when on-chain version is 0. -pub type VersionCheckedMigrateToV2 = - frame_support::migrations::VersionedMigration< - 0, - 2, - VersionUncheckedMigrateToV2, - crate::pallet::Pallet, - ::DbWeight, - >; +pub type MigrateToV2 = frame_support::migrations::VersionedMigration< + 0, + 2, + VersionUncheckedMigrateToV2, + crate::pallet::Pallet, + ::DbWeight, +>; pub(crate) mod old { use super::*; diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index 22471e883a7..a9eb460421f 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -51,7 +51,7 @@ use sp_std::marker::PhantomData; /// // OnRuntimeUpgrade implementation... /// } /// -/// pub type VersionCheckedMigrateV5ToV6 = +/// pub type MigrateV5ToV6 = /// VersionedMigration< /// 5, /// 6, @@ -63,7 +63,7 @@ use sp_std::marker::PhantomData; /// // Migrations tuple to pass to the Executive pallet: /// pub type Migrations = ( /// // other migrations... -/// VersionCheckedMigrateV5ToV6, +/// MigrateV5ToV6, /// // other migrations... /// ); /// ``` -- GitLab From 6b7be115fd523057d01cea46e9680a62a0b08d97 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Fri, 10 Nov 2023 16:38:24 +0100 Subject: [PATCH 027/199] Contracts: Add XCM traits to interface with contracts (#2086) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are introducing a new set of `XcmController` traits (final name yet to be determined). These traits are implemented by `pallet-xcm` and allows other pallets, such as `pallet_contracts`, to rely on these traits instead of tight coupling them to `pallet-xcm`. Using only the existing Xcm traits would mean duplicating the logic from `pallet-xcm` in these other pallets, which we aim to avoid. Our objective is to ensure that when these APIs are called from `pallet-contracts`, they produce the exact same outcomes as if called directly from `pallet-xcm`. The other benefits is that we can also expose return values to `pallet-contracts` instead of just calling `pallet-xcm` dispatchable and getting a `DispatchResult` back. See traits integration in this PR https://github.com/paritytech/polkadot-sdk/pull/1248, where the traits are used as follow to define and implement `pallet-contracts` Config. ```rs // Contracts config: pub trait Config: frame_system::Config { // ... /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and /// execute XCM programs. type Xcm: xcm_executor::traits::Controller< OriginFor, ::RuntimeCall, BlockNumberFor, >; } // implementation impl pallet_contracts::Config for Runtime { // ... type Xcm = pallet_xcm::Pallet; } ``` --------- Co-authored-by: Alexander Theißen Co-authored-by: command-bot <> --- .../src/weights/pallet_xcm.rs | 187 ++++++---- .../src/weights/pallet_xcm.rs | 163 +++++---- .../src/weights/pallet_xcm.rs | 182 ++++++---- .../src/weights/pallet_xcm.rs | 161 +++++---- .../src/weights/pallet_xcm.rs | 151 +++++--- .../src/weights/pallet_xcm.rs | 159 +++++---- .../src/weights/pallet_xcm.rs | 163 +++++---- .../src/weights/pallet_xcm.rs | 27 ++ .../src/weights/pallet_xcm.rs | 215 ++++++----- .../runtime/rococo/src/weights/pallet_xcm.rs | 329 ++++++++--------- .../runtime/westend/src/weights/pallet_xcm.rs | 333 +++++++++--------- polkadot/xcm/pallet-xcm/Cargo.toml | 3 +- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 27 ++ polkadot/xcm/pallet-xcm/src/lib.rs | 157 +++++++-- polkadot/xcm/xcm-builder/src/controller.rs | 187 ++++++++++ polkadot/xcm/xcm-builder/src/lib.rs | 6 + polkadot/xcm/xcm-builder/src/tests/mock.rs | 2 +- .../xcm-executor/src/traits/on_response.rs | 46 ++- prdoc/pr_2086.prdoc | 15 + 19 files changed, 1602 insertions(+), 911 deletions(-) create mode 100644 polkadot/xcm/xcm-builder/src/controller.rs create mode 100644 prdoc/pr_2086.prdoc diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs index becfca7a891..1e4a723e10f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -61,12 +62,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_576_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_043_000 picoseconds. + Weight::from_parts(25_670_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 24_785_000 picoseconds. - Weight::from_parts(25_097_000, 0) + // Minimum execution time: 18_893_000 picoseconds. + Weight::from_parts(19_261_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -86,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 18_561_000 picoseconds. - Weight::from_parts(19_121_000, 0) + // Minimum execution time: 14_107_000 picoseconds. + Weight::from_parts(14_500_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -107,8 +108,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_298_000 picoseconds. - Weight::from_parts(9_721_000, 0) + // Minimum execution time: 7_175_000 picoseconds. + Weight::from_parts(7_493_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +119,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_912_000 picoseconds. - Weight::from_parts(3_262_000, 0) + // Minimum execution time: 2_162_000 picoseconds. + Weight::from_parts(2_278_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +128,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -141,16 +144,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 35_127_000 picoseconds. - Weight::from_parts(36_317_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_144_000 picoseconds. + Weight::from_parts(30_134_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,12 +170,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `3791` - // Minimum execution time: 36_634_000 picoseconds. - Weight::from_parts(37_983_000, 0) - .saturating_add(Weight::from_parts(0, 3791)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_522_000 picoseconds. + Weight::from_parts(32_679_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +184,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_940_000 picoseconds. - Weight::from_parts(3_085_000, 0) + // Minimum execution time: 2_338_000 picoseconds. + Weight::from_parts(2_494_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -190,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 17_400_000 picoseconds. - Weight::from_parts(17_759_000, 0) + // Minimum execution time: 17_315_000 picoseconds. + Weight::from_parts(17_787_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 17_287_000 picoseconds. - Weight::from_parts(17_678_000, 0) + // Minimum execution time: 17_273_000 picoseconds. + Weight::from_parts(17_712_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -214,13 +219,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 18_941_000 picoseconds. - Weight::from_parts(19_285_000, 0) + // Minimum execution time: 18_395_000 picoseconds. + Weight::from_parts(19_095_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -233,12 +240,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `6116` - // Minimum execution time: 32_668_000 picoseconds. - Weight::from_parts(33_533_000, 0) - .saturating_add(Weight::from_parts(0, 6116)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_343_000 picoseconds. + Weight::from_parts(28_068_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -247,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_182_000 picoseconds. - Weight::from_parts(9_498_000, 0) + // Minimum execution time: 9_156_000 picoseconds. + Weight::from_parts(9_552_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -258,14 +265,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_519_000 picoseconds. - Weight::from_parts(17_943_000, 0) + // Minimum execution time: 17_454_000 picoseconds. + Weight::from_parts(17_831_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -278,12 +287,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `179` - // Estimated: `11069` - // Minimum execution time: 38_680_000 picoseconds. - Weight::from_parts(39_984_000, 0) - .saturating_add(Weight::from_parts(0, 11069)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_299_000 picoseconds. + Weight::from_parts(35_156_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_508_000 picoseconds. + Weight::from_parts(4_702_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_557_000 picoseconds. + Weight::from_parts(26_980_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs index 0d3fe0adb1b..27867e278ed 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 28_284_000 picoseconds. - Weight::from_parts(29_186_000, 0) + // Minimum execution time: 25_203_000 picoseconds. + Weight::from_parts(25_927_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 24_830_000 picoseconds. - Weight::from_parts(26_312_000, 0) + // Minimum execution time: 20_113_000 picoseconds. + Weight::from_parts(20_439_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -86,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 18_584_000 picoseconds. - Weight::from_parts(19_083_000, 0) + // Minimum execution time: 14_959_000 picoseconds. + Weight::from_parts(15_264_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -107,8 +108,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_415_000 picoseconds. - Weight::from_parts(9_821_000, 0) + // Minimum execution time: 7_399_000 picoseconds. + Weight::from_parts(7_674_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -118,8 +119,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_902_000 picoseconds. - Weight::from_parts(3_377_000, 0) + // Minimum execution time: 2_388_000 picoseconds. + Weight::from_parts(2_522_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -127,6 +128,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -143,14 +146,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 32_730_000 picoseconds. - Weight::from_parts(33_879_000, 0) + // Minimum execution time: 28_791_000 picoseconds. + Weight::from_parts(29_443_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,12 +170,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `257` - // Estimated: `3722` - // Minimum execution time: 34_053_000 picoseconds. - Weight::from_parts(34_506_000, 0) - .saturating_add(Weight::from_parts(0, 3722)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `292` + // Estimated: `3757` + // Minimum execution time: 30_880_000 picoseconds. + Weight::from_parts(31_675_000, 0) + .saturating_add(Weight::from_parts(0, 3757)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -179,8 +184,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_824_000 picoseconds. - Weight::from_parts(2_986_000, 0) + // Minimum execution time: 2_365_000 picoseconds. + Weight::from_parts(2_550_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -190,8 +195,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `129` // Estimated: `11019` - // Minimum execution time: 17_011_000 picoseconds. - Weight::from_parts(17_488_000, 0) + // Minimum execution time: 17_185_000 picoseconds. + Weight::from_parts(17_680_000, 0) .saturating_add(Weight::from_parts(0, 11019)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -202,8 +207,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `133` // Estimated: `11023` - // Minimum execution time: 17_191_000 picoseconds. - Weight::from_parts(17_784_000, 0) + // Minimum execution time: 16_974_000 picoseconds. + Weight::from_parts(17_660_000, 0) .saturating_add(Weight::from_parts(0, 11023)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -214,13 +219,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `140` // Estimated: `13505` - // Minimum execution time: 18_625_000 picoseconds. - Weight::from_parts(19_177_000, 0) + // Minimum execution time: 18_536_000 picoseconds. + Weight::from_parts(19_292_000, 0) .saturating_add(Weight::from_parts(0, 13505)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -235,10 +242,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 30_762_000 picoseconds. - Weight::from_parts(31_481_000, 0) + // Minimum execution time: 27_368_000 picoseconds. + Weight::from_parts(28_161_000, 0) .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -247,8 +254,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `172` // Estimated: `8587` - // Minimum execution time: 9_025_000 picoseconds. - Weight::from_parts(9_423_000, 0) + // Minimum execution time: 9_553_000 picoseconds. + Weight::from_parts(9_899_000, 0) .saturating_add(Weight::from_parts(0, 8587)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -258,14 +265,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `140` // Estimated: `11030` - // Minimum execution time: 17_550_000 picoseconds. - Weight::from_parts(17_939_000, 0) + // Minimum execution time: 17_445_000 picoseconds. + Weight::from_parts(18_206_000, 0) .saturating_add(Weight::from_parts(0, 11030)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -280,10 +289,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `146` // Estimated: `11036` - // Minimum execution time: 36_922_000 picoseconds. - Weight::from_parts(37_709_000, 0) + // Minimum execution time: 34_200_000 picoseconds. + Weight::from_parts(35_198_000, 0) .saturating_add(Weight::from_parts(0, 11036)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1554` + // Minimum execution time: 4_679_000 picoseconds. + Weight::from_parts(4_841_000, 0) + .saturating_add(Weight::from_parts(0, 1554)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7706` + // Estimated: `11171` + // Minimum execution time: 27_281_000 picoseconds. + Weight::from_parts(27_694_000, 0) + .saturating_add(Weight::from_parts(0, 11171)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs index 909d7f28907..afe85fdaf28 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm.rs @@ -17,27 +17,25 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -62,35 +62,39 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 30_015_000 picoseconds. - Weight::from_parts(30_576_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 24_498_000 picoseconds. + Weight::from_parts(25_385_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 24_785_000 picoseconds. - Weight::from_parts(25_097_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `39` + // Estimated: `3504` + // Minimum execution time: 19_746_000 picoseconds. + Weight::from_parts(20_535_000, 0) + .saturating_add(Weight::from_parts(0, 3504)) + .saturating_add(T::DbWeight::get().reads(2)) } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 18_561_000 picoseconds. - Weight::from_parts(19_121_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `39` + // Estimated: `3504` + // Minimum execution time: 15_059_000 picoseconds. + Weight::from_parts(15_386_000, 0) + .saturating_add(Weight::from_parts(0, 3504)) + .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -108,8 +112,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_298_000 picoseconds. - Weight::from_parts(9_721_000, 0) + // Minimum execution time: 7_108_000 picoseconds. + Weight::from_parts(7_458_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -119,8 +123,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_912_000 picoseconds. - Weight::from_parts(3_262_000, 0) + // Minimum execution time: 2_205_000 picoseconds. + Weight::from_parts(2_360_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -128,6 +132,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,16 +148,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 35_127_000 picoseconds. - Weight::from_parts(36_317_000, 0) - .saturating_add(Weight::from_parts(0, 3574)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_099_000 picoseconds. + Weight::from_parts(29_580_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,12 +174,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `326` - // Estimated: `3791` - // Minimum execution time: 36_634_000 picoseconds. - Weight::from_parts(37_983_000, 0) - .saturating_add(Weight::from_parts(0, 3791)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_161_000 picoseconds. + Weight::from_parts(31_933_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -180,8 +188,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_940_000 picoseconds. - Weight::from_parts(3_085_000, 0) + // Minimum execution time: 2_158_000 picoseconds. + Weight::from_parts(2_316_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -191,8 +199,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 17_400_000 picoseconds. - Weight::from_parts(17_759_000, 0) + // Minimum execution time: 16_934_000 picoseconds. + Weight::from_parts(17_655_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -203,8 +211,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 17_287_000 picoseconds. - Weight::from_parts(17_678_000, 0) + // Minimum execution time: 17_658_000 picoseconds. + Weight::from_parts(17_973_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -215,13 +223,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 18_941_000 picoseconds. - Weight::from_parts(19_285_000, 0) + // Minimum execution time: 18_673_000 picoseconds. + Weight::from_parts(19_027_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,12 +244,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `6116` - // Minimum execution time: 32_668_000 picoseconds. - Weight::from_parts(33_533_000, 0) - .saturating_add(Weight::from_parts(0, 6116)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_171_000 picoseconds. + Weight::from_parts(27_802_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -248,8 +258,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_182_000 picoseconds. - Weight::from_parts(9_498_000, 0) + // Minimum execution time: 9_423_000 picoseconds. + Weight::from_parts(9_636_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -259,14 +269,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_519_000 picoseconds. - Weight::from_parts(17_943_000, 0) + // Minimum execution time: 17_442_000 picoseconds. + Weight::from_parts(17_941_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,12 +291,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `179` - // Estimated: `11069` - // Minimum execution time: 38_680_000 picoseconds. - Weight::from_parts(39_984_000, 0) - .saturating_add(Weight::from_parts(0, 11069)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_340_000 picoseconds. + Weight::from_parts(34_934_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 5_496_000 picoseconds. + Weight::from_parts(5_652_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_140_000 picoseconds. + Weight::from_parts(26_824_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs index 5c97d358591..340edafb0b0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-westend-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=asset-hub-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-westend/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=asset-hub-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 29_833_000 picoseconds. - Weight::from_parts(30_472_000, 0) + // Minimum execution time: 25_534_000 picoseconds. + Weight::from_parts(26_413_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 22_922_000 picoseconds. - Weight::from_parts(23_650_000, 0) + // Minimum execution time: 20_513_000 picoseconds. + Weight::from_parts(20_837_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -86,8 +87,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 17_468_000 picoseconds. - Weight::from_parts(18_068_000, 0) + // Minimum execution time: 14_977_000 picoseconds. + Weight::from_parts(15_207_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -95,8 +96,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_780_000 picoseconds. - Weight::from_parts(9_201_000, 0) + // Minimum execution time: 7_440_000 picoseconds. + Weight::from_parts(7_651_000, 0) .saturating_add(Weight::from_parts(0, 0)) } /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) @@ -105,8 +106,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_886_000 picoseconds. - Weight::from_parts(9_102_000, 0) + // Minimum execution time: 7_253_000 picoseconds. + Weight::from_parts(7_584_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -116,8 +117,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_665_000 picoseconds. - Weight::from_parts(2_884_000, 0) + // Minimum execution time: 2_299_000 picoseconds. + Weight::from_parts(2_435_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -125,6 +126,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -141,14 +144,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 34_513_000 picoseconds. - Weight::from_parts(36_207_000, 0) + // Minimum execution time: 29_440_000 picoseconds. + Weight::from_parts(30_675_000, 0) .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -165,10 +170,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `363` // Estimated: `3828` - // Minimum execution time: 35_770_000 picoseconds. - Weight::from_parts(36_462_000, 0) + // Minimum execution time: 31_876_000 picoseconds. + Weight::from_parts(32_588_000, 0) .saturating_add(Weight::from_parts(0, 3828)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -177,8 +182,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_763_000 picoseconds. - Weight::from_parts(3_079_000, 0) + // Minimum execution time: 2_385_000 picoseconds. + Weight::from_parts(2_607_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -188,8 +193,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `162` // Estimated: `11052` - // Minimum execution time: 17_170_000 picoseconds. - Weight::from_parts(17_674_000, 0) + // Minimum execution time: 16_927_000 picoseconds. + Weight::from_parts(17_554_000, 0) .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -200,8 +205,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `166` // Estimated: `11056` - // Minimum execution time: 16_857_000 picoseconds. - Weight::from_parts(17_407_000, 0) + // Minimum execution time: 16_965_000 picoseconds. + Weight::from_parts(17_807_000, 0) .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -212,13 +217,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `13538` - // Minimum execution time: 19_040_000 picoseconds. - Weight::from_parts(19_550_000, 0) + // Minimum execution time: 18_763_000 picoseconds. + Weight::from_parts(19_359_000, 0) .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -233,10 +240,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `212` // Estimated: `6152` - // Minimum execution time: 31_623_000 picoseconds. - Weight::from_parts(32_646_000, 0) + // Minimum execution time: 27_371_000 picoseconds. + Weight::from_parts(28_185_000, 0) .saturating_add(Weight::from_parts(0, 6152)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -245,8 +252,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `206` // Estimated: `8621` - // Minimum execution time: 9_148_000 picoseconds. - Weight::from_parts(9_402_000, 0) + // Minimum execution time: 9_165_000 picoseconds. + Weight::from_parts(9_539_000, 0) .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -256,14 +263,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `173` // Estimated: `11063` - // Minimum execution time: 17_630_000 picoseconds. - Weight::from_parts(17_941_000, 0) + // Minimum execution time: 17_384_000 picoseconds. + Weight::from_parts(17_777_000, 0) .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -278,10 +287,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `215` // Estimated: `11105` - // Minimum execution time: 38_425_000 picoseconds. - Weight::from_parts(39_219_000, 0) + // Minimum execution time: 34_260_000 picoseconds. + Weight::from_parts(35_428_000, 0) .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_710_000 picoseconds. + Weight::from_parts(4_900_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_843_000 picoseconds. + Weight::from_parts(27_404_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs index 730bc492684..7f4c2026f2b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 27_523_000 picoseconds. - Weight::from_parts(28_238_000, 0) + // Minimum execution time: 22_520_000 picoseconds. + Weight::from_parts(23_167_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1489` - // Minimum execution time: 24_139_000 picoseconds. - Weight::from_parts(24_806_000, 0) + // Minimum execution time: 19_639_000 picoseconds. + Weight::from_parts(20_230_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -106,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_988_000 picoseconds. - Weight::from_parts(9_227_000, 0) + // Minimum execution time: 7_175_000 picoseconds. + Weight::from_parts(7_496_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_571_000 picoseconds. - Weight::from_parts(2_667_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_359_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,14 +145,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 33_194_000 picoseconds. - Weight::from_parts(34_089_000, 0) + // Minimum execution time: 27_229_000 picoseconds. + Weight::from_parts(27_673_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,10 +171,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `255` // Estimated: `3720` - // Minimum execution time: 35_413_000 picoseconds. - Weight::from_parts(36_359_000, 0) + // Minimum execution time: 29_812_000 picoseconds. + Weight::from_parts(30_649_000, 0) .saturating_add(Weight::from_parts(0, 3720)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_679_000 picoseconds. - Weight::from_parts(2_823_000, 0) + // Minimum execution time: 2_212_000 picoseconds. + Weight::from_parts(2_367_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +194,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `95` // Estimated: `10985` - // Minimum execution time: 15_117_000 picoseconds. - Weight::from_parts(15_603_000, 0) + // Minimum execution time: 14_768_000 picoseconds. + Weight::from_parts(15_036_000, 0) .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -201,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `99` // Estimated: `10989` - // Minimum execution time: 14_978_000 picoseconds. - Weight::from_parts(15_370_000, 0) + // Minimum execution time: 14_662_000 picoseconds. + Weight::from_parts(15_155_000, 0) .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -213,13 +218,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 16_549_000 picoseconds. - Weight::from_parts(16_944_000, 0) + // Minimum execution time: 16_198_000 picoseconds. + Weight::from_parts(16_456_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,10 +241,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 30_111_000 picoseconds. - Weight::from_parts(30_795_000, 0) + // Minimum execution time: 25_825_000 picoseconds. + Weight::from_parts(26_744_000, 0) .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -247,7 +254,7 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Measured: `136` // Estimated: `8551` // Minimum execution time: 8_622_000 picoseconds. - Weight::from_parts(8_865_000, 0) + Weight::from_parts(8_931_000, 0) .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -257,14 +264,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `10996` - // Minimum execution time: 15_194_000 picoseconds. - Weight::from_parts(15_646_000, 0) + // Minimum execution time: 15_397_000 picoseconds. + Weight::from_parts(15_650_000, 0) .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,10 +288,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `112` // Estimated: `11002` - // Minimum execution time: 36_625_000 picoseconds. - Weight::from_parts(37_571_000, 0) + // Minimum execution time: 32_330_000 picoseconds. + Weight::from_parts(33_255_000, 0) .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_142_000 picoseconds. + Weight::from_parts(4_308_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_814_000 picoseconds. + Weight::from_parts(26_213_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs index 98dd7e36f07..b73c009cbda 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,10 +64,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_510_000 picoseconds. - Weight::from_parts(25_755_000, 0) + // Minimum execution time: 22_442_000 picoseconds. + Weight::from_parts(23_346_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) @@ -75,8 +76,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `1489` - // Minimum execution time: 24_125_000 picoseconds. - Weight::from_parts(25_559_000, 0) + // Minimum execution time: 19_655_000 picoseconds. + Weight::from_parts(20_086_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -106,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_625_000 picoseconds. - Weight::from_parts(9_232_000, 0) + // Minimum execution time: 6_858_000 picoseconds. + Weight::from_parts(7_225_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_690_000 picoseconds. - Weight::from_parts(2_906_000, 0) + // Minimum execution time: 2_099_000 picoseconds. + Weight::from_parts(2_190_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,14 +145,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 30_131_000 picoseconds. - Weight::from_parts(31_138_000, 0) + // Minimum execution time: 27_073_000 picoseconds. + Weight::from_parts(27_584_000, 0) .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -164,12 +169,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `220` - // Estimated: `3685` - // Minimum execution time: 32_411_000 picoseconds. - Weight::from_parts(33_009_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `255` + // Estimated: `3720` + // Minimum execution time: 29_949_000 picoseconds. + Weight::from_parts(30_760_000, 0) + .saturating_add(Weight::from_parts(0, 3720)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_548_000 picoseconds. - Weight::from_parts(2_727_000, 0) + // Minimum execution time: 2_192_000 picoseconds. + Weight::from_parts(2_276_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +194,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `95` // Estimated: `10985` - // Minimum execution time: 15_298_000 picoseconds. - Weight::from_parts(15_964_000, 0) + // Minimum execution time: 14_681_000 picoseconds. + Weight::from_parts(15_131_000, 0) .saturating_add(Weight::from_parts(0, 10985)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -201,8 +206,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `99` // Estimated: `10989` - // Minimum execution time: 14_927_000 picoseconds. - Weight::from_parts(15_528_000, 0) + // Minimum execution time: 14_523_000 picoseconds. + Weight::from_parts(15_113_000, 0) .saturating_add(Weight::from_parts(0, 10989)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -213,13 +218,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `13471` - // Minimum execution time: 16_409_000 picoseconds. - Weight::from_parts(16_960_000, 0) + // Minimum execution time: 15_989_000 picoseconds. + Weight::from_parts(16_518_000, 0) .saturating_add(Weight::from_parts(0, 13471)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,10 +241,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `6046` - // Minimum execution time: 28_204_000 picoseconds. - Weight::from_parts(28_641_000, 0) + // Minimum execution time: 25_127_000 picoseconds. + Weight::from_parts(25_773_000, 0) .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -246,8 +253,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `136` // Estimated: `8551` - // Minimum execution time: 8_576_000 picoseconds. - Weight::from_parts(8_895_000, 0) + // Minimum execution time: 8_352_000 picoseconds. + Weight::from_parts(8_592_000, 0) .saturating_add(Weight::from_parts(0, 8551)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -257,14 +264,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `10996` - // Minimum execution time: 15_263_000 picoseconds. - Weight::from_parts(15_726_000, 0) + // Minimum execution time: 14_658_000 picoseconds. + Weight::from_parts(15_345_000, 0) .saturating_add(Weight::from_parts(0, 10996)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,10 +288,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `112` // Estimated: `11002` - // Minimum execution time: 34_186_000 picoseconds. - Weight::from_parts(35_204_000, 0) + // Minimum execution time: 31_478_000 picoseconds. + Weight::from_parts(32_669_000, 0) .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_066_000 picoseconds. + Weight::from_parts(4_267_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_260_000 picoseconds. + Weight::from_parts(25_570_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs index a6e093c4b94..5aa4999c624 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -63,22 +64,24 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 29_724_000 picoseconds. - Weight::from_parts(30_440_000, 0) + // Minimum execution time: 24_179_000 picoseconds. + Weight::from_parts(24_684_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } + /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 26_779_000 picoseconds. - Weight::from_parts(27_249_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 21_093_000 picoseconds. + Weight::from_parts(21_523_000, 0) + .saturating_add(Weight::from_parts(0, 3503)) + .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -106,8 +109,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_170_000 picoseconds. - Weight::from_parts(9_629_000, 0) + // Minimum execution time: 6_938_000 picoseconds. + Weight::from_parts(7_243_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +120,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_769_000 picoseconds. - Weight::from_parts(2_933_000, 0) + // Minimum execution time: 2_159_000 picoseconds. + Weight::from_parts(2_290_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +129,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -142,14 +147,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `75` // Estimated: `3540` - // Minimum execution time: 34_547_000 picoseconds. - Weight::from_parts(35_653_000, 0) + // Minimum execution time: 28_337_000 picoseconds. + Weight::from_parts(29_265_000, 0) .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -166,10 +173,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `292` // Estimated: `3757` - // Minimum execution time: 36_274_000 picoseconds. - Weight::from_parts(37_281_000, 0) + // Minimum execution time: 30_599_000 picoseconds. + Weight::from_parts(31_272_000, 0) .saturating_add(Weight::from_parts(0, 3757)) - .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +185,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_749_000 picoseconds. - Weight::from_parts(2_917_000, 0) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_280_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -189,8 +196,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `187` // Estimated: `11077` - // Minimum execution time: 17_649_000 picoseconds. - Weight::from_parts(17_964_000, 0) + // Minimum execution time: 18_262_000 picoseconds. + Weight::from_parts(18_640_000, 0) .saturating_add(Weight::from_parts(0, 11077)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -201,8 +208,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `191` // Estimated: `11081` - // Minimum execution time: 17_551_000 picoseconds. - Weight::from_parts(18_176_000, 0) + // Minimum execution time: 18_512_000 picoseconds. + Weight::from_parts(18_888_000, 0) .saturating_add(Weight::from_parts(0, 11081)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) @@ -213,13 +220,15 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198` // Estimated: `13563` - // Minimum execution time: 19_261_000 picoseconds. - Weight::from_parts(19_714_000, 0) + // Minimum execution time: 19_362_000 picoseconds. + Weight::from_parts(20_056_000, 0) .saturating_add(Weight::from_parts(0, 13563)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -234,10 +243,10 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `142` // Estimated: `6082` - // Minimum execution time: 31_630_000 picoseconds. - Weight::from_parts(32_340_000, 0) + // Minimum execution time: 27_318_000 picoseconds. + Weight::from_parts(28_075_000, 0) .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) @@ -246,8 +255,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `172` // Estimated: `8587` - // Minimum execution time: 9_218_000 picoseconds. - Weight::from_parts(9_558_000, 0) + // Minimum execution time: 9_930_000 picoseconds. + Weight::from_parts(10_192_000, 0) .saturating_add(Weight::from_parts(0, 8587)) .saturating_add(T::DbWeight::get().reads(3)) } @@ -257,14 +266,16 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `198` // Estimated: `11088` - // Minimum execution time: 18_133_000 picoseconds. - Weight::from_parts(18_663_000, 0) + // Minimum execution time: 18_305_000 picoseconds. + Weight::from_parts(18_738_000, 0) .saturating_add(Weight::from_parts(0, 11088)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -279,10 +290,36 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `204` // Estimated: `11094` - // Minimum execution time: 38_878_000 picoseconds. - Weight::from_parts(39_779_000, 0) + // Minimum execution time: 34_559_000 picoseconds. + Weight::from_parts(35_241_000, 0) .saturating_add(Weight::from_parts(0, 11094)) - .saturating_add(T::DbWeight::get().reads(9)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1554` + // Minimum execution time: 4_512_000 picoseconds. + Weight::from_parts(4_671_000, 0) + .saturating_add(Weight::from_parts(0, 1554)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7706` + // Estimated: `11171` + // Minimum execution time: 26_473_000 picoseconds. + Weight::from_parts(26_960_000, 0) + .saturating_add(Weight::from_parts(0, 11171)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs index 72bdb282585..9f17d327024 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_xcm.rs @@ -286,4 +286,31 @@ impl pallet_xcm::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `32` + // Estimated: `1517` + // Minimum execution time: 4_142_000 picoseconds. + Weight::from_parts(4_308_000, 0) + .saturating_add(Weight::from_parts(0, 1517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7669` + // Estimated: `11134` + // Minimum execution time: 25_814_000 picoseconds. + Weight::from_parts(26_213_000, 0) + .saturating_add(Weight::from_parts(0, 11134)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } + diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs index 26e668854f2..57e50284147 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs @@ -1,42 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=collectives-polkadot-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,6 +48,8 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -61,22 +62,22 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `3576` - // Minimum execution time: 27_795_000 picoseconds. - Weight::from_parts(28_215_000, 0) - .saturating_add(Weight::from_parts(0, 3576)) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_050_000 picoseconds. + Weight::from_parts(26_382_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn teleport_assets() -> Weight { // Proof Size summary in bytes: - // Measured: `32` + // Measured: `69` // Estimated: `1489` - // Minimum execution time: 23_847_000 picoseconds. - Weight::from_parts(24_332_000, 0) + // Minimum execution time: 21_625_000 picoseconds. + Weight::from_parts(22_076_000, 0) .saturating_add(Weight::from_parts(0, 1489)) .saturating_add(T::DbWeight::get().reads(1)) } @@ -106,8 +107,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_885_000 picoseconds. - Weight::from_parts(9_128_000, 0) + // Minimum execution time: 7_076_000 picoseconds. + Weight::from_parts(7_378_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -117,8 +118,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_670_000 picoseconds. - Weight::from_parts(2_815_000, 0) + // Minimum execution time: 2_327_000 picoseconds. + Weight::from_parts(2_454_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -126,6 +127,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -140,16 +143,18 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `3576` - // Minimum execution time: 32_214_000 picoseconds. - Weight::from_parts(32_989_000, 0) - .saturating_add(Weight::from_parts(0, 3576)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_080_000 picoseconds. + Weight::from_parts(29_886_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -164,12 +169,12 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `294` - // Estimated: `3759` - // Minimum execution time: 33_638_000 picoseconds. - Weight::from_parts(34_206_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(6)) + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 30_746_000 picoseconds. + Weight::from_parts(31_631_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) @@ -178,8 +183,8 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_602_000 picoseconds. - Weight::from_parts(2_730_000, 0) + // Minimum execution time: 2_208_000 picoseconds. + Weight::from_parts(2_341_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -187,11 +192,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `129` - // Estimated: `11019` - // Minimum execution time: 16_199_000 picoseconds. - Weight::from_parts(16_833_000, 0) - .saturating_add(Weight::from_parts(0, 11019)) + // Measured: `162` + // Estimated: `11052` + // Minimum execution time: 16_239_000 picoseconds. + Weight::from_parts(16_881_000, 0) + .saturating_add(Weight::from_parts(0, 11052)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -199,11 +204,11 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `133` - // Estimated: `11023` - // Minimum execution time: 16_561_000 picoseconds. - Weight::from_parts(16_872_000, 0) - .saturating_add(Weight::from_parts(0, 11023)) + // Measured: `166` + // Estimated: `11056` + // Minimum execution time: 16_711_000 picoseconds. + Weight::from_parts(16_944_000, 0) + .saturating_add(Weight::from_parts(0, 11056)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -211,15 +216,17 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `13505` - // Minimum execution time: 17_812_000 picoseconds. - Weight::from_parts(20_036_000, 0) - .saturating_add(Weight::from_parts(0, 13505)) + // Measured: `173` + // Estimated: `13538` + // Minimum execution time: 18_142_000 picoseconds. + Weight::from_parts(18_470_000, 0) + .saturating_add(Weight::from_parts(0, 13538)) .saturating_add(T::DbWeight::get().reads(5)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -232,39 +239,41 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `6118` - // Minimum execution time: 30_153_000 picoseconds. - Weight::from_parts(31_366_000, 0) - .saturating_add(Weight::from_parts(0, 6118)) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_687_000 picoseconds. + Weight::from_parts(28_250_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `8587` - // Minimum execution time: 9_465_000 picoseconds. - Weight::from_parts(9_743_000, 0) - .saturating_add(Weight::from_parts(0, 8587)) + // Measured: `206` + // Estimated: `8621` + // Minimum execution time: 9_675_000 picoseconds. + Weight::from_parts(9_992_000, 0) + .saturating_add(Weight::from_parts(0, 8621)) .saturating_add(T::DbWeight::get().reads(3)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `11030` - // Minimum execution time: 16_954_000 picoseconds. - Weight::from_parts(19_772_000, 0) - .saturating_add(Weight::from_parts(0, 11030)) + // Measured: `173` + // Estimated: `11063` + // Minimum execution time: 16_597_000 picoseconds. + Weight::from_parts(17_248_000, 0) + .saturating_add(Weight::from_parts(0, 11063)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) @@ -277,12 +286,38 @@ impl pallet_xcm::WeightInfo for WeightInfo { /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `182` - // Estimated: `11072` - // Minimum execution time: 37_302_000 picoseconds. - Weight::from_parts(38_124_000, 0) - .saturating_add(Weight::from_parts(0, 11072)) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 34_649_000 picoseconds. + Weight::from_parts(35_475_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_619_000 picoseconds. + Weight::from_parts(4_756_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_721_000 picoseconds. + Weight::from_parts(27_412_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } } diff --git a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs index 43b4358b890..aafded3f731 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_xcm.rs @@ -17,10 +17,10 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm5`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,13 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,58 +48,56 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 37_039_000 picoseconds. - Weight::from_parts(37_605_000, 0) - .saturating_add(Weight::from_parts(0, 4030)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 27_328_000 picoseconds. + Weight::from_parts(27_976_000, 0) + .saturating_add(Weight::from_parts(0, 3607)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } fn teleport_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_646_000 picoseconds. - Weight::from_parts(22_119_000, 0) + // Minimum execution time: 16_280_000 picoseconds. + Weight::from_parts(16_904_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_353_000 picoseconds. - Weight::from_parts(21_768_000, 0) + // Minimum execution time: 15_869_000 picoseconds. + Weight::from_parts(16_264_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_942_000 picoseconds. - Weight::from_parts(10_110_000, 0) + // Minimum execution time: 6_923_000 picoseconds. + Weight::from_parts(7_432_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet SupportedVersion (r:0 w:1) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_951_000 picoseconds. - Weight::from_parts(10_182_000, 0) + // Minimum execution time: 7_333_000 picoseconds. + Weight::from_parts(7_566_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -108,171 +105,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_163_000 picoseconds. - Weight::from_parts(3_298_000, 0) + // Minimum execution time: 2_219_000 picoseconds. + Weight::from_parts(2_375_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 41_207_000 picoseconds. - Weight::from_parts(41_879_000, 0) - .saturating_add(Weight::from_parts(0, 4030)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `142` + // Estimated: `3607` + // Minimum execution time: 30_650_000 picoseconds. + Weight::from_parts(31_683_000, 0) + .saturating_add(Weight::from_parts(0, 3607)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `837` - // Estimated: `4302` - // Minimum execution time: 44_763_000 picoseconds. - Weight::from_parts(45_368_000, 0) - .saturating_add(Weight::from_parts(0, 4302)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `322` + // Estimated: `3787` + // Minimum execution time: 37_666_000 picoseconds. + Weight::from_parts(38_920_000, 0) + .saturating_add(Weight::from_parts(0, 3787)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: XcmPallet XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: XcmPallet XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `XcmPallet::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_089_000 picoseconds. - Weight::from_parts(3_246_000, 0) + // Minimum execution time: 2_244_000 picoseconds. + Weight::from_parts(2_425_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: XcmPallet SupportedVersion (r:4 w:2) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `229` - // Estimated: `11119` - // Minimum execution time: 16_733_000 picoseconds. - Weight::from_parts(17_354_000, 0) - .saturating_add(Weight::from_parts(0, 11119)) + // Measured: `26` + // Estimated: `10916` + // Minimum execution time: 14_710_000 picoseconds. + Weight::from_parts(15_156_000, 0) + .saturating_add(Weight::from_parts(0, 10916)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifiers (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `233` - // Estimated: `11123` - // Minimum execution time: 16_959_000 picoseconds. - Weight::from_parts(17_306_000, 0) - .saturating_add(Weight::from_parts(0, 11123)) + // Measured: `30` + // Estimated: `10920` + // Minimum execution time: 14_630_000 picoseconds. + Weight::from_parts(15_290_000, 0) + .saturating_add(Weight::from_parts(0, 10920)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `13608` - // Minimum execution time: 17_964_000 picoseconds. - Weight::from_parts(18_548_000, 0) - .saturating_add(Weight::from_parts(0, 13608)) + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 16_686_000 picoseconds. + Weight::from_parts(17_332_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: XcmPallet VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `635` - // Estimated: `6575` - // Minimum execution time: 39_436_000 picoseconds. - Weight::from_parts(39_669_000, 0) - .saturating_add(Weight::from_parts(0, 6575)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `178` + // Estimated: `6118` + // Minimum execution time: 30_180_000 picoseconds. + Weight::from_parts(31_351_000, 0) + .saturating_add(Weight::from_parts(0, 6118)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `8687` - // Minimum execution time: 8_991_000 picoseconds. - Weight::from_parts(9_248_000, 0) - .saturating_add(Weight::from_parts(0, 8687)) + // Measured: `69` + // Estimated: `8484` + // Minimum execution time: 9_624_000 picoseconds. + Weight::from_parts(10_029_000, 0) + .saturating_add(Weight::from_parts(0, 8484)) .saturating_add(T::DbWeight::get().reads(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `240` - // Estimated: `11130` - // Minimum execution time: 17_614_000 picoseconds. - Weight::from_parts(17_948_000, 0) - .saturating_add(Weight::from_parts(0, 11130)) + // Measured: `37` + // Estimated: `10927` + // Minimum execution time: 15_139_000 picoseconds. + Weight::from_parts(15_575_000, 0) + .saturating_add(Weight::from_parts(0, 10927)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `639` - // Estimated: `11529` - // Minimum execution time: 45_531_000 picoseconds. - Weight::from_parts(46_533_000, 0) - .saturating_add(Weight::from_parts(0, 11529)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `182` + // Estimated: `11072` + // Minimum execution time: 37_871_000 picoseconds. + Weight::from_parts(38_940_000, 0) + .saturating_add(Weight::from_parts(0, 11072)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_732_000 picoseconds. + Weight::from_parts(2_892_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7576` + // Estimated: `11041` + // Minimum execution time: 23_813_000 picoseconds. + Weight::from_parts(24_201_000, 0) + .saturating_add(Weight::from_parts(0, 11041)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/runtime/westend/src/weights/pallet_xcm.rs b/polkadot/runtime/westend/src/weights/pallet_xcm.rs index 7f2a1de44e9..cca4bdbd91e 100644 --- a/polkadot/runtime/westend/src/weights/pallet_xcm.rs +++ b/polkadot/runtime/westend/src/weights/pallet_xcm.rs @@ -17,15 +17,16 @@ //! Autogenerated weights for `pallet_xcm` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot +// ./target/production/polkadot // benchmark // pallet +// --chain=westend-dev // --steps=50 // --repeat=20 // --no-storage-info @@ -35,12 +36,8 @@ // --extrinsic=* // --execution=wasm // --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -53,44 +50,42 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm`. pub struct WeightInfo(PhantomData); impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 33_628_000 picoseconds. - Weight::from_parts(34_633_000, 0) - .saturating_add(Weight::from_parts(0, 3634)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 28_098_000 picoseconds. + Weight::from_parts(28_887_000, 0) + .saturating_add(Weight::from_parts(0, 3574)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) } fn teleport_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_535_000 picoseconds. - Weight::from_parts(21_936_000, 0) + // Minimum execution time: 17_609_000 picoseconds. + Weight::from_parts(18_000_000, 0) .saturating_add(Weight::from_parts(0, 0)) } fn reserve_transfer_assets() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 21_576_000 picoseconds. - Weight::from_parts(21_942_000, 0) + // Minimum execution time: 17_007_000 picoseconds. + Weight::from_parts(17_471_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: Benchmark Override (r:0 w:0) - /// Proof Skipped: Benchmark Override (max_values: None, max_size: None, mode: Measured) + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) fn execute() -> Weight { // Proof Size summary in bytes: // Measured: `0` @@ -99,14 +94,14 @@ impl pallet_xcm::WeightInfo for WeightInfo { Weight::from_parts(18_446_744_073_709_551_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet SupportedVersion (r:0 w:1) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:0 w:1) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_xcm_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_764_000 picoseconds. - Weight::from_parts(9_927_000, 0) + // Minimum execution time: 7_444_000 picoseconds. + Weight::from_parts(7_671_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,171 +109,189 @@ impl pallet_xcm::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_824_000 picoseconds. - Weight::from_parts(2_935_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_253_000, 0) .saturating_add(Weight::from_parts(0, 0)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet QueryCounter (r:1 w:1) - /// Proof Skipped: XcmPallet QueryCounter (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_subscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 38_436_000 picoseconds. - Weight::from_parts(39_300_000, 0) - .saturating_add(Weight::from_parts(0, 3634)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) + // Measured: `109` + // Estimated: `3574` + // Minimum execution time: 31_318_000 picoseconds. + Weight::from_parts(32_413_000, 0) + .saturating_add(Weight::from_parts(0, 3574)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) } - /// Storage: XcmPallet VersionNotifiers (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet Queries (r:0 w:1) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) fn force_unsubscribe_version_notify() -> Weight { // Proof Size summary in bytes: - // Measured: `361` - // Estimated: `3826` - // Minimum execution time: 41_600_000 picoseconds. - Weight::from_parts(42_703_000, 0) - .saturating_add(Weight::from_parts(0, 3826)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `289` + // Estimated: `3754` + // Minimum execution time: 35_282_000 picoseconds. + Weight::from_parts(35_969_000, 0) + .saturating_add(Weight::from_parts(0, 3754)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: XcmPallet XcmExecutionSuspended (r:0 w:1) - /// Proof Skipped: XcmPallet XcmExecutionSuspended (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: `XcmPallet::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `XcmPallet::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn force_suspension() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(2_958_000, 0) + // Minimum execution time: 2_247_000 picoseconds. + Weight::from_parts(2_381_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: XcmPallet SupportedVersion (r:4 w:2) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::SupportedVersion` (r:4 w:2) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_supported_version() -> Weight { // Proof Size summary in bytes: - // Measured: `229` - // Estimated: `11119` - // Minimum execution time: 17_640_000 picoseconds. - Weight::from_parts(18_011_000, 0) - .saturating_add(Weight::from_parts(0, 11119)) + // Measured: `26` + // Estimated: `10916` + // Minimum execution time: 14_512_000 picoseconds. + Weight::from_parts(15_042_000, 0) + .saturating_add(Weight::from_parts(0, 10916)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifiers (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifiers (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifiers` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notifiers() -> Weight { // Proof Size summary in bytes: - // Measured: `233` - // Estimated: `11123` - // Minimum execution time: 17_325_000 picoseconds. - Weight::from_parts(17_896_000, 0) - .saturating_add(Weight::from_parts(0, 11123)) + // Measured: `30` + // Estimated: `10920` + // Minimum execution time: 14_659_000 picoseconds. + Weight::from_parts(15_164_000, 0) + .saturating_add(Weight::from_parts(0, 10920)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:5 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:5 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn already_notified_target() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `13608` - // Minimum execution time: 19_295_000 picoseconds. - Weight::from_parts(19_840_000, 0) - .saturating_add(Weight::from_parts(0, 13608)) + // Measured: `40` + // Estimated: `13405` + // Minimum execution time: 16_261_000 picoseconds. + Weight::from_parts(16_986_000, 0) + .saturating_add(Weight::from_parts(0, 13405)) .saturating_add(T::DbWeight::get().reads(5)) } - /// Storage: XcmPallet VersionNotifyTargets (r:2 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:2 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_current_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `239` - // Estimated: `6179` - // Minimum execution time: 35_819_000 picoseconds. - Weight::from_parts(36_708_000, 0) - .saturating_add(Weight::from_parts(0, 6179)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `145` + // Estimated: `6085` + // Minimum execution time: 30_539_000 picoseconds. + Weight::from_parts(31_117_000, 0) + .saturating_add(Weight::from_parts(0, 6085)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:3 w:0) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:3 w:0) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn notify_target_migration_fail() -> Weight { // Proof Size summary in bytes: - // Measured: `272` - // Estimated: `8687` - // Minimum execution time: 9_572_000 picoseconds. - Weight::from_parts(9_907_000, 0) - .saturating_add(Weight::from_parts(0, 8687)) + // Measured: `69` + // Estimated: `8484` + // Minimum execution time: 9_463_000 picoseconds. + Weight::from_parts(9_728_000, 0) + .saturating_add(Weight::from_parts(0, 8484)) .saturating_add(T::DbWeight::get().reads(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_version_notify_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `240` - // Estimated: `11130` - // Minimum execution time: 17_376_000 picoseconds. - Weight::from_parts(17_870_000, 0) - .saturating_add(Weight::from_parts(0, 11130)) + // Measured: `37` + // Estimated: `10927` + // Minimum execution time: 15_169_000 picoseconds. + Weight::from_parts(15_694_000, 0) + .saturating_add(Weight::from_parts(0, 10927)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: XcmPallet VersionNotifyTargets (r:4 w:2) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:4 w:2) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) fn migrate_and_notify_old_targets() -> Weight { // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `11133` - // Minimum execution time: 43_468_000 picoseconds. - Weight::from_parts(44_327_000, 0) - .saturating_add(Weight::from_parts(0, 11133)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(6)) + // Measured: `149` + // Estimated: `11039` + // Minimum execution time: 37_549_000 picoseconds. + Weight::from_parts(38_203_000, 0) + .saturating_add(Weight::from_parts(0, 11039)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `XcmPallet::QueryCounter` (r:1 w:1) + /// Proof: `XcmPallet::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::Queries` (r:0 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_947_000 picoseconds. + Weight::from_parts(3_117_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `XcmPallet::Queries` (r:1 w:1) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7576` + // Estimated: `11041` + // Minimum execution time: 24_595_000 picoseconds. + Weight::from_parts(24_907_000, 0) + .saturating_add(Weight::from_parts(0, 11041)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) } } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index da472fbe6db..6b5d5e75de8 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -23,12 +23,12 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false} xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } [dev-dependencies] pallet-balances = { path = "../../../substrate/frame/balances" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } -xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder" } [features] default = [ "std" ] @@ -45,6 +45,7 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "xcm-builder/std", "xcm-executor/std", "xcm/std", ] diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index aca4bd1fb3f..3eecbfec518 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -190,6 +190,33 @@ benchmarks! { Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } + new_query { + let responder = MultiLocation::from(Parent); + let timeout = 1u32.into(); + let match_querier = MultiLocation::from(Here); + }: { + Pallet::::new_query(responder, timeout, match_querier); + } + + take_response { + let responder = MultiLocation::from(Parent); + let timeout = 1u32.into(); + let match_querier = MultiLocation::from(Here); + let query_id = Pallet::::new_query(responder, timeout, match_querier); + let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( + u32::MAX, + (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), + (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), + u32::MAX, + u32::MAX, + u32::MAX, + ).unwrap()).collect::>(); + Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); + + }: { + as QueryHandler>::take_response(query_id); + } + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext_with_balances(Vec::new()), diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 321bb294b88..2d969fb870c 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -28,9 +28,17 @@ mod tests; pub mod migration; use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::traits::{ - Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, OriginTrait, +use frame_support::{ + dispatch::GetDispatchInfo, + pallet_prelude::*, + traits::{ + Contains, ContainsPair, Currency, Defensive, EnsureOrigin, Get, LockableCurrency, + OriginTrait, WithdrawReasons, + }, + PalletId, }; +use frame_system::pallet_prelude::{BlockNumberFor, *}; +pub use pallet::*; use scale_info::TypeInfo; use sp_runtime::{ traits::{ @@ -41,17 +49,15 @@ use sp_runtime::{ }; use sp_std::{boxed::Box, marker::PhantomData, prelude::*, result::Result, vec}; use xcm::{latest::QueryResponseInfo, prelude::*}; -use xcm_executor::traits::{ConvertOrigin, Properties}; - -use frame_support::{ - dispatch::GetDispatchInfo, pallet_prelude::*, traits::WithdrawReasons, PalletId, +use xcm_builder::{ + ExecuteController, ExecuteControllerWeightInfo, QueryController, QueryControllerWeightInfo, + SendController, SendControllerWeightInfo, }; -use frame_system::pallet_prelude::*; -pub use pallet::*; use xcm_executor::{ traits::{ - CheckSuspension, ClaimAssets, ConvertLocation, DropAssets, MatchesFungible, OnResponse, - QueryHandler, QueryResponseStatus, VersionChangeNotifier, WeightBounds, + CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, DropAssets, MatchesFungible, + OnResponse, Properties, QueryHandler, QueryResponseStatus, VersionChangeNotifier, + WeightBounds, }, Assets, }; @@ -73,6 +79,8 @@ pub trait WeightInfo { fn notify_target_migration_fail() -> Weight; fn migrate_version_notify_targets() -> Weight; fn migrate_and_notify_old_targets() -> Weight; + fn new_query() -> Weight; + fn take_response() -> Weight; } /// fallback implementation @@ -141,6 +149,14 @@ impl WeightInfo for TestWeightInfo { fn migrate_and_notify_old_targets() -> Weight { Weight::from_parts(100_000_000, 0) } + + fn new_query() -> Weight { + Weight::from_parts(100_000_000, 0) + } + + fn take_response() -> Weight { + Weight::from_parts(100_000_000, 0) + } } #[frame_support::pallet] @@ -267,6 +283,93 @@ pub mod pallet { type ReachableDest: Get>; } + impl ExecuteControllerWeightInfo for Pallet { + fn execute() -> Weight { + T::WeightInfo::execute() + } + } + + impl ExecuteController, ::RuntimeCall> for Pallet { + type WeightInfo = Self; + fn execute( + origin: OriginFor, + message: Box::RuntimeCall>>, + max_weight: Weight, + ) -> Result { + let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; + let hash = message.using_encoded(sp_io::hashing::blake2_256); + let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; + let value = (origin_location, message); + ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); + let (origin_location, message) = value; + let outcome = T::XcmExecutor::execute_xcm_in_credit( + origin_location, + message, + hash, + max_weight, + max_weight, + ); + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + Ok(outcome) + } + } + + impl SendControllerWeightInfo for Pallet { + fn send() -> Weight { + T::WeightInfo::send() + } + } + + impl SendController> for Pallet { + type WeightInfo = Self; + fn send( + origin: OriginFor, + dest: Box, + message: Box>, + ) -> Result { + let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; + let interior: Junctions = + origin_location.try_into().map_err(|_| Error::::InvalidOrigin)?; + let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; + let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; + + let message_id = + Self::send_xcm(interior, dest, message.clone()).map_err(Error::::from)?; + let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; + Self::deposit_event(e); + Ok(message_id) + } + } + + impl QueryControllerWeightInfo for Pallet { + fn query() -> Weight { + T::WeightInfo::new_query() + } + fn take_response() -> Weight { + T::WeightInfo::take_response() + } + } + + impl QueryController, BlockNumberFor> for Pallet { + type WeightInfo = Self; + + fn query( + origin: OriginFor, + timeout: BlockNumberFor, + match_querier: VersionedMultiLocation, + ) -> Result { + let responder = ::ExecuteXcmOrigin::ensure_origin(origin)?; + let query_id = ::new_query( + responder, + timeout, + MultiLocation::try_from(match_querier) + .map_err(|_| Into::::into(Error::::BadVersion))?, + ); + + Ok(query_id) + } + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -771,16 +874,7 @@ pub mod pallet { dest: Box, message: Box>, ) -> DispatchResult { - let origin_location = T::SendXcmOrigin::ensure_origin(origin)?; - let interior: Junctions = - origin_location.try_into().map_err(|_| Error::::InvalidOrigin)?; - let dest = MultiLocation::try_from(*dest).map_err(|()| Error::::BadVersion)?; - let message: Xcm<()> = (*message).try_into().map_err(|()| Error::::BadVersion)?; - - let message_id = - Self::send_xcm(interior, dest, message.clone()).map_err(Error::::from)?; - let e = Event::Sent { origin: origin_location, destination: dest, message, message_id }; - Self::deposit_event(e); + >::send(origin, dest, message)?; Ok(()) } @@ -896,7 +990,7 @@ pub mod pallet { /// execution attempt will be made. /// /// NOTE: A successful return to this does *not* imply that the `msg` was executed - /// successfully to completion; only that *some* of it was executed. + /// successfully to completion; only that it was attempted. #[pallet::call_index(3)] #[pallet::weight(max_weight.saturating_add(T::WeightInfo::execute()))] pub fn execute( @@ -904,23 +998,8 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { - let origin_location = T::ExecuteXcmOrigin::ensure_origin(origin)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let message = (*message).try_into().map_err(|()| Error::::BadVersion)?; - let value = (origin_location, message); - ensure!(T::XcmExecuteFilter::contains(&value), Error::::Filtered); - let (origin_location, message) = value; - let outcome = T::XcmExecutor::execute_xcm_in_credit( - origin_location, - message, - hash, - max_weight, - max_weight, - ); - let result = - Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()); - Self::deposit_event(Event::Attempted { outcome }); - result + let outcome = >::execute(origin, message, max_weight)?; + Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()) } /// Extoll that a particular destination can be communicated with through a particular @@ -1145,7 +1224,7 @@ impl QueryHandler for Pallet { timeout: BlockNumberFor, match_querier: impl Into, ) -> Self::QueryId { - Self::do_new_query(responder, None, timeout, match_querier).into() + Self::do_new_query(responder, None, timeout, match_querier) } /// To check the status of the query, use `fn query()` passing the resultant `QueryId` diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs new file mode 100644 index 00000000000..0ee638b73e1 --- /dev/null +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -0,0 +1,187 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A set of traits that define how a pallet interface with XCM. +//! Controller traits defined in this module are high-level traits that will rely on other traits +//! from `xcm-executor` to perform their tasks. + +use frame_support::pallet_prelude::DispatchError; +use sp_std::boxed::Box; +use xcm::prelude::*; +use xcm_executor::traits::QueryHandler; + +/// Umbrella trait for all Controller traits. +pub trait Controller: + ExecuteController + SendController + QueryController +{ +} + +impl Controller for T where + T: ExecuteController + + SendController + + QueryController +{ +} + +/// Weight functions needed for [`ExecuteController`]. +pub trait ExecuteControllerWeightInfo { + /// Weight for [`ExecuteController::execute`] + fn execute() -> Weight; +} + +/// Execute an XCM locally, for a given origin. +/// +/// An implementation of that trait will handle the low-level details of the execution, such as: +/// - Validating and Converting the origin to a MultiLocation. +/// - Handling versioning. +/// - Calling the internal executor, which implements [`ExecuteXcm`]. +pub trait ExecuteController { + /// Weight information for ExecuteController functions. + type WeightInfo: ExecuteControllerWeightInfo; + + /// Attempt to execute an XCM locally, and return the outcome. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call. + /// - `message`: the XCM program to be executed. + /// - `max_weight`: the maximum weight that can be consumed by the execution. + fn execute( + origin: Origin, + message: Box>, + max_weight: Weight, + ) -> Result; +} + +/// Weight functions needed for [`SendController`]. +pub trait SendControllerWeightInfo { + /// Weight for [`SendController::send`] + fn send() -> Weight; +} + +/// Send an XCM from a given origin. +/// +/// An implementation of that trait will handle the low-level details of dispatching an XCM, such +/// as: +/// - Validating and Converting the origin to an interior location. +/// - Handling versioning. +/// - Calling the internal router, which implements [`SendXcm`]. +pub trait SendController { + /// Weight information for SendController functions. + type WeightInfo: SendControllerWeightInfo; + + /// Send an XCM to be executed by a remote location. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call. + /// - `dest`: the destination of the message. + /// - `msg`: the XCM to be sent. + fn send( + origin: Origin, + dest: Box, + message: Box>, + ) -> Result; +} + +/// Weight functions needed for [`QueryController`]. +pub trait QueryControllerWeightInfo { + /// Weight for [`QueryController::query`] + fn query() -> Weight; + + /// Weight for [`QueryHandler::take_response`] + fn take_response() -> Weight; +} + +/// Query a remote location, from a given origin. +/// +/// An implementation of that trait will handle the low-level details of querying a remote location, +/// such as: +/// - Validating and Converting the origin to an interior location. +/// - Handling versioning. +/// - Calling the [`QueryHandler`] to register the query. +pub trait QueryController: QueryHandler { + /// Weight information for QueryController functions. + type WeightInfo: QueryControllerWeightInfo; + + /// Query a remote location. + /// + /// # Parameters + /// + /// - `origin`: the origin of the call, used to determine the responder. + /// - `timeout`: the maximum block number that the query should be responded to. + /// - `match_querier`: the querier that the query should be responded to. + fn query( + origin: Origin, + timeout: Timeout, + match_querier: VersionedMultiLocation, + ) -> Result; +} + +impl ExecuteController for () { + type WeightInfo = (); + fn execute( + _origin: Origin, + _message: Box>, + _max_weight: Weight, + ) -> Result { + Ok(Outcome::Error(XcmError::Unimplemented)) + } +} + +impl ExecuteControllerWeightInfo for () { + fn execute() -> Weight { + Weight::zero() + } +} + +impl SendController for () { + type WeightInfo = (); + fn send( + _origin: Origin, + _dest: Box, + _message: Box>, + ) -> Result { + Ok(Default::default()) + } +} + +impl SendControllerWeightInfo for () { + fn send() -> Weight { + Weight::zero() + } +} + +impl QueryControllerWeightInfo for () { + fn query() -> Weight { + Weight::zero() + } + fn take_response() -> Weight { + Weight::zero() + } +} + +impl QueryController for () { + type WeightInfo = (); + + fn query( + _origin: Origin, + _timeout: Timeout, + _match_querier: VersionedMultiLocation, + ) -> Result { + Ok(Default::default()) + } +} diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 0a74b3f579a..35f95b85c89 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -115,3 +115,9 @@ pub use origin_aliases::AliasForeignAccountId32; mod pay; pub use pay::{FixedLocation, LocatableAssetId, PayAccountId32OnChainOverXcm, PayOverXcm}; + +mod controller; +pub use controller::{ + Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, + QueryControllerWeightInfo, SendController, SendControllerWeightInfo, +}; diff --git a/polkadot/xcm/xcm-builder/src/tests/mock.rs b/polkadot/xcm/xcm-builder/src/tests/mock.rs index 543b00e0118..189274eb5f5 100644 --- a/polkadot/xcm/xcm-builder/src/tests/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/mock.rs @@ -414,7 +414,7 @@ pub fn response(query_id: u64) -> Option { /// Mock implementation of the [`QueryHandler`] trait for creating XCM success queries and expecting /// responses. pub struct TestQueryHandler(core::marker::PhantomData<(T, BlockNumber)>); -impl QueryHandler +impl QueryHandler for TestQueryHandler { type QueryId = u64; diff --git a/polkadot/xcm/xcm-executor/src/traits/on_response.rs b/polkadot/xcm/xcm-executor/src/traits/on_response.rs index 3558160dc87..ea41f242a97 100644 --- a/polkadot/xcm/xcm-executor/src/traits/on_response.rs +++ b/polkadot/xcm/xcm-executor/src/traits/on_response.rs @@ -14,10 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::Xcm; +use crate::{Junctions::Here, Xcm}; use core::result; -use frame_support::pallet_prelude::{Get, TypeInfo}; -use parity_scale_codec::{FullCodec, MaxEncodedLen}; +use frame_support::{ + pallet_prelude::{Get, TypeInfo}, + parameter_types, +}; +use parity_scale_codec::{Decode, Encode, FullCodec, MaxEncodedLen}; use sp_arithmetic::traits::Zero; use sp_std::fmt::Debug; use xcm::latest::{ @@ -103,7 +106,7 @@ impl VersionChangeNotifier for () { } /// The possible state of an XCM query response. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Encode, Decode)] pub enum QueryResponseStatus { /// The response has arrived, and includes the inner Response and the block number it arrived /// at. @@ -129,7 +132,7 @@ pub trait QueryHandler { + PartialEq + Debug + Copy; - type BlockNumber: Zero; + type BlockNumber: Zero + Encode; type Error; type UniversalLocation: Get; @@ -165,3 +168,36 @@ pub trait QueryHandler { #[cfg(feature = "runtime-benchmarks")] fn expect_response(id: Self::QueryId, response: Response); } + +parameter_types! { + pub UniversalLocation: InteriorMultiLocation = Here; +} + +impl QueryHandler for () { + type BlockNumber = u64; + type Error = (); + type QueryId = u64; + type UniversalLocation = UniversalLocation; + + fn take_response(_query_id: Self::QueryId) -> QueryResponseStatus { + QueryResponseStatus::NotFound + } + fn new_query( + _responder: impl Into, + _timeout: Self::BlockNumber, + _match_querier: impl Into, + ) -> Self::QueryId { + 0u64 + } + + fn report_outcome( + _message: &mut Xcm<()>, + _responder: impl Into, + _timeout: Self::BlockNumber, + ) -> Result { + Err(()) + } + + #[cfg(feature = "runtime-benchmarks")] + fn expect_response(_id: Self::QueryId, _response: crate::Response) {} +} diff --git a/prdoc/pr_2086.prdoc b/prdoc/pr_2086.prdoc new file mode 100644 index 00000000000..a9bbd0729d5 --- /dev/null +++ b/prdoc/pr_2086.prdoc @@ -0,0 +1,15 @@ +title: "Contracts: Add XCM traits to interface with contracts" + +doc: + - audience: Core Dev + description: | + We are introducing a new set of `XcmController` traits in `pallet-xcm`. + These traits extract functionality from `pallet-xcm` and provide high-level interaction with XCM. + They enable other pallets, like `pallet_contracts`, to rely on these traits instead of tight coupling to `pallet-xcm` itself. + +crates: + - name: "pallet-xcm" + semver: patch + - name: "xcm-executor" + semver: patch + -- GitLab From 0c5dcca9e3cef6b2f456fccefd9f6c5e43444053 Mon Sep 17 00:00:00 2001 From: Daniel Olano Date: Sat, 11 Nov 2023 20:34:08 +0100 Subject: [PATCH 028/199] Add `s` utility function to frame support (#2275) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A utility function I consider quite useful to declare string literals that are backed by an array. --------- Co-authored-by: Bastian Köcher Co-authored-by: Davide Galassi --- substrate/primitives/runtime/src/lib.rs | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 0e1d4c31fd7..ddf92554c83 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -954,6 +954,32 @@ pub fn print(print: impl traits::Printable) { print.print(); } +/// Utility function to declare string literals backed by an array of length N. +/// +/// The input can be shorter than N, in that case the end of the array is padded with zeros. +/// +/// [`str_array`] is useful when converting strings that end up in the storage as fixed size arrays +/// or in const contexts where static data types have strings that could also end up in the storage. +/// +/// # Example +/// +/// ```rust +/// # use sp_runtime::str_array; +/// const MY_STR: [u8; 6] = str_array("data"); +/// assert_eq!(MY_STR, *b"data\0\0"); +/// ``` +pub const fn str_array(s: &str) -> [u8; N] { + debug_assert!(s.len() <= N, "String literal doesn't fit in array"); + let mut i = 0; + let mut arr = [0; N]; + let s = s.as_bytes(); + while i < s.len() { + arr[i] = s[i]; + i += 1; + } + arr +} + /// Describes on what should happen with a storage transaction. pub enum TransactionOutcome { /// Commit the transaction. -- GitLab From 951bcceba085f1d3f5d022b1c211e5150fe8d2b2 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Mon, 13 Nov 2023 07:33:37 +0200 Subject: [PATCH 029/199] Unify `ChainSync` actions under one enum (#2180) All `ChainSync` actions that `SyncingEngine` should perform are unified under one `ChainSyncAction`. Processing of these actions put into a single place after `select!` in `SyncingEngine::run` instead of multiple places where calling `ChainSync` methods. --- .../client/network/sync/src/chain_sync.rs | 277 +++++++----------- .../network/sync/src/chain_sync/test.rs | 202 ++++++++----- substrate/client/network/sync/src/engine.rs | 163 +++++------ 3 files changed, 314 insertions(+), 328 deletions(-) diff --git a/substrate/client/network/sync/src/chain_sync.rs b/substrate/client/network/sync/src/chain_sync.rs index 858125f93f1..2adc6d42341 100644 --- a/substrate/client/network/sync/src/chain_sync.rs +++ b/substrate/client/network/sync/src/chain_sync.rs @@ -184,90 +184,26 @@ struct GapSync { target: NumberFor, } -/// Action that the parent of [`ChainSync`] should perform after reporting imported blocks with -/// [`ChainSync::on_blocks_processed`]. -pub enum BlockRequestAction { +/// Action that the parent of [`ChainSync`] should perform after reporting a network or block event. +#[derive(Debug)] +pub enum ChainSyncAction { /// Send block request to peer. Always implies dropping a stale block request to the same peer. - SendRequest { peer_id: PeerId, request: BlockRequest }, + SendBlockRequest { peer_id: PeerId, request: BlockRequest }, /// Drop stale block request. - RemoveStale { peer_id: PeerId }, -} - -/// Action that the parent of [`ChainSync`] should perform if we want to import blocks. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ImportBlocksAction { - pub origin: BlockOrigin, - pub blocks: Vec>, -} - -/// Action that the parent of [`ChainSync`] should perform if we want to import justifications. -pub struct ImportJustificationsAction { - pub peer_id: PeerId, - pub hash: B::Hash, - pub number: NumberFor, - pub justifications: Justifications, -} - -/// Result of [`ChainSync::on_block_data`]. -#[derive(Debug, Clone, PartialEq, Eq)] -enum OnBlockData { - /// The block should be imported. - Import(ImportBlocksAction), - /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest), - /// Continue processing events. - Continue, -} - -/// Result of [`ChainSync::on_block_justification`]. -#[derive(Debug, Clone, PartialEq, Eq)] -enum OnBlockJustification { - /// The justification needs no further handling. - Nothing, - /// The justification should be imported. - Import { + CancelBlockRequest { peer_id: PeerId }, + /// Peer misbehaved. Disconnect, report it and cancel the block request to it. + DropPeer(BadPeer), + /// Import blocks. + ImportBlocks { origin: BlockOrigin, blocks: Vec> }, + /// Import justifications. + ImportJustifications { peer_id: PeerId, - hash: Block::Hash, - number: NumberFor, + hash: B::Hash, + number: NumberFor, justifications: Justifications, }, } -// Result of [`ChainSync::on_state_data`]. -#[derive(Debug)] -enum OnStateData { - /// The block and state that should be imported. - Import(BlockOrigin, IncomingBlock), - /// A new state request needs to be made to the given peer. - Continue, -} - -/// Action that the parent of [`ChainSync`] should perform after reporting block response with -/// [`ChainSync::on_block_response`]. -pub enum OnBlockResponse { - /// Nothing to do. - Nothing, - /// Perform block request. - SendBlockRequest { peer_id: PeerId, request: BlockRequest }, - /// Import blocks. - ImportBlocks(ImportBlocksAction), - /// Import justifications. - ImportJustifications(ImportJustificationsAction), - /// Invalid block response, the peer should be disconnected and reported. - DisconnectPeer(BadPeer), -} - -/// Action that the parent of [`ChainSync`] should perform after reporting state response with -/// [`ChainSync::on_state_response`]. -pub enum OnStateResponse { - /// Nothing to do. - Nothing, - /// Import blocks. - ImportBlocks(ImportBlocksAction), - /// Invalid state response, the peer should be disconnected and reported. - DisconnectPeer(BadPeer), -} - /// The main data structure which contains all the state for a chains /// active syncing strategy. pub struct ChainSync { @@ -313,6 +249,8 @@ pub struct ChainSync { import_existing: bool, /// Gap download process. gap_sync: Option>, + /// Pending actions. + actions: Vec>, } /// All the data we have about a Peer that we are trying to sync with @@ -427,6 +365,7 @@ where gap_sync: None, warp_sync_config, warp_sync_target_block_header: None, + actions: Vec::new(), }; sync.reset_sync_start_point()?; @@ -509,8 +448,17 @@ where } /// Notify syncing state machine that a new sync peer has connected. + pub fn new_peer(&mut self, peer_id: PeerId, best_hash: B::Hash, best_number: NumberFor) { + match self.new_peer_inner(peer_id, best_hash, best_number) { + Ok(Some(request)) => + self.actions.push(ChainSyncAction::SendBlockRequest { peer_id, request }), + Ok(None) => {}, + Err(bad_peer) => self.actions.push(ChainSyncAction::DropPeer(bad_peer)), + } + } + #[must_use] - pub fn new_peer( + fn new_peer_inner( &mut self, peer_id: PeerId, best_hash: B::Hash, @@ -727,7 +675,7 @@ where peer_id: &PeerId, request: Option>, response: BlockResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { self.downloaded_blocks += response.blocks.len(); let mut gap = false; let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(peer_id) { @@ -892,10 +840,12 @@ where start: *start, state: next_state, }; - return Ok(OnBlockData::Request( - *peer_id, - ancestry_request::(next_num), - )) + let request = ancestry_request::(next_num); + self.actions.push(ChainSyncAction::SendBlockRequest { + peer_id: *peer_id, + request, + }); + return Ok(()) } else { // Ancestry search is complete. Check if peer is on a stale fork unknown // to us and add it to sync targets if necessary. @@ -929,7 +879,7 @@ where .insert(*peer_id); } peer.state = PeerSyncState::Available; - Vec::new() + return Ok(()) } }, PeerSyncState::DownloadingWarpTargetBlock => { @@ -940,8 +890,7 @@ where match warp_sync.import_target_block( blocks.pop().expect("`blocks` len checked above."), ) { - warp::TargetBlockImportResult::Success => - return Ok(OnBlockData::Continue), + warp::TargetBlockImportResult::Success => return Ok(()), warp::TargetBlockImportResult::BadResponse => return Err(BadPeer(*peer_id, rep::VERIFICATION_FAIL)), } @@ -963,7 +912,7 @@ where "Logic error: we think we are downloading warp target block from {}, but no warp sync is happening.", peer_id, ); - return Ok(OnBlockData::Continue) + return Ok(()) } }, PeerSyncState::Available | @@ -1000,7 +949,9 @@ where return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) }; - Ok(OnBlockData::Import(self.validate_and_queue_blocks(new_blocks, gap))) + self.validate_and_queue_blocks(new_blocks, gap); + + Ok(()) } /// Submit a justification response for processing. @@ -1009,7 +960,7 @@ where &mut self, peer_id: PeerId, response: BlockResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { let peer = if let Some(peer) = self.peers.get_mut(&peer_id) { peer } else { @@ -1017,7 +968,7 @@ where target: LOG_TARGET, "💔 Called on_block_justification with a peer ID of an unknown peer", ); - return Ok(OnBlockJustification::Nothing) + return Ok(()) }; self.allowed_requests.add(&peer_id); @@ -1054,11 +1005,17 @@ where if let Some((peer_id, hash, number, justifications)) = self.extra_justifications.on_response(peer_id, justification) { - return Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) + self.actions.push(ChainSyncAction::ImportJustifications { + peer_id, + hash, + number, + justifications, + }); + return Ok(()) } } - Ok(OnBlockJustification::Nothing) + Ok(()) } /// Report a justification import (successful or not). @@ -1196,8 +1153,7 @@ where } /// Notify that a sync peer has disconnected. - #[must_use] - pub fn peer_disconnected(&mut self, peer_id: &PeerId) -> Option> { + pub fn peer_disconnected(&mut self, peer_id: &PeerId) { self.blocks.clear_peer_download(peer_id); if let Some(gap_sync) = &mut self.gap_sync { gap_sync.blocks.clear_peer_download(peer_id) @@ -1212,7 +1168,9 @@ where let blocks = self.ready_blocks(); - (!blocks.is_empty()).then(|| self.validate_and_queue_blocks(blocks, false)) + if !blocks.is_empty() { + self.validate_and_queue_blocks(blocks, false); + } } /// Get prometheus metrics. @@ -1259,11 +1217,7 @@ where } } - fn validate_and_queue_blocks( - &mut self, - mut new_blocks: Vec>, - gap: bool, - ) -> ImportBlocksAction { + fn validate_and_queue_blocks(&mut self, mut new_blocks: Vec>, gap: bool) { let orig_len = new_blocks.len(); new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); if new_blocks.len() != orig_len { @@ -1295,7 +1249,7 @@ where } self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - ImportBlocksAction { origin, blocks: new_blocks } + self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: new_blocks }) } fn update_peer_common_number(&mut self, peer_id: &PeerId, new_common: NumberFor) { @@ -1346,7 +1300,7 @@ where /// Restart the sync process. This will reset all pending block requests and return an iterator /// of new block requests to make to peers. Peers that were downloading finality data (i.e. /// their state was `DownloadingJustification`) are unaffected and will stay in the same state. - fn restart(&mut self) -> impl Iterator, BadPeer>> + '_ { + fn restart(&mut self) { self.blocks.clear(); if let Err(e) = self.reset_sync_start_point() { warn!(target: LOG_TARGET, "💔 Unable to restart sync: {e}"); @@ -1360,7 +1314,7 @@ where ); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().filter_map(move |(peer_id, mut p)| { + old_peers.into_iter().for_each(|(peer_id, mut p)| { // peers that were downloading justifications // should be kept in that state. if let PeerSyncState::DownloadingJustification(_) = p.state { @@ -1374,19 +1328,21 @@ where ); p.common_number = self.best_queued_number; self.peers.insert(peer_id, p); - return None + return } // handle peers that were in other states. - match self.new_peer(peer_id, p.best_hash, p.best_number) { + let action = match self.new_peer_inner(peer_id, p.best_hash, p.best_number) { // since the request is not a justification, remove it from pending responses - Ok(None) => Some(Ok(BlockRequestAction::RemoveStale { peer_id })), + Ok(None) => ChainSyncAction::CancelBlockRequest { peer_id }, // update the request if the new one is available - Ok(Some(request)) => Some(Ok(BlockRequestAction::SendRequest { peer_id, request })), + Ok(Some(request)) => ChainSyncAction::SendBlockRequest { peer_id, request }, // this implies that we need to drop pending response from the peer - Err(e) => Some(Err(e)), - } - }) + Err(bad_peer) => ChainSyncAction::DropPeer(bad_peer), + }; + + self.actions.push(action); + }); } /// Find a block to start sync from. If we sync with state, that's the latest block we have @@ -1534,13 +1490,12 @@ where } /// Submit blocks received in a response. - #[must_use] pub fn on_block_response( &mut self, peer_id: PeerId, request: BlockRequest, blocks: Vec>, - ) -> OnBlockResponse { + ) { let block_response = BlockResponse:: { id: request.id, blocks }; let blocks_range = || match ( @@ -1563,41 +1518,21 @@ where blocks_range(), ); - if request.fields == BlockAttributes::JUSTIFICATION { - match self.on_block_justification(peer_id, block_response) { - Ok(OnBlockJustification::Nothing) => OnBlockResponse::Nothing, - Ok(OnBlockJustification::Import { peer_id, hash, number, justifications }) => - OnBlockResponse::ImportJustifications(ImportJustificationsAction { - peer_id, - hash, - number, - justifications, - }), - Err(bad_peer) => OnBlockResponse::DisconnectPeer(bad_peer), - } + let res = if request.fields == BlockAttributes::JUSTIFICATION { + self.on_block_justification(peer_id, block_response) } else { - match self.on_block_data(&peer_id, Some(request), block_response) { - Ok(OnBlockData::Import(action)) => OnBlockResponse::ImportBlocks(action), - Ok(OnBlockData::Request(peer_id, request)) => - OnBlockResponse::SendBlockRequest { peer_id, request }, - Ok(OnBlockData::Continue) => OnBlockResponse::Nothing, - Err(bad_peer) => OnBlockResponse::DisconnectPeer(bad_peer), - } + self.on_block_data(&peer_id, Some(request), block_response) + }; + + if let Err(bad_peer) = res { + self.actions.push(ChainSyncAction::DropPeer(bad_peer)); } } /// Submit a state received in a response. - #[must_use] - pub fn on_state_response( - &mut self, - peer_id: PeerId, - response: OpaqueStateResponse, - ) -> OnStateResponse { - match self.on_state_data(&peer_id, response) { - Ok(OnStateData::Import(origin, block)) => - OnStateResponse::ImportBlocks(ImportBlocksAction { origin, blocks: vec![block] }), - Ok(OnStateData::Continue) => OnStateResponse::Nothing, - Err(bad_peer) => OnStateResponse::DisconnectPeer(bad_peer), + pub fn on_state_response(&mut self, peer_id: PeerId, response: OpaqueStateResponse) { + if let Err(bad_peer) = self.on_state_data(&peer_id, response) { + self.actions.push(ChainSyncAction::DropPeer(bad_peer)); } } @@ -1833,11 +1768,12 @@ where None } + #[must_use] fn on_state_data( &mut self, peer_id: &PeerId, response: OpaqueStateResponse, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { let response: Box = response.0.downcast().map_err(|_error| { error!( target: LOG_TARGET, @@ -1892,9 +1828,10 @@ where state: Some(state), }; debug!(target: LOG_TARGET, "State download is complete. Import is queued"); - Ok(OnStateData::Import(origin, block)) + self.actions.push(ChainSyncAction::ImportBlocks { origin, blocks: vec![block] }); + Ok(()) }, - ImportResult::Continue => Ok(OnStateData::Continue), + ImportResult::Continue => Ok(()), ImportResult::BadResponse => { debug!(target: LOG_TARGET, "Bad state data received from {peer_id}"); Err(BadPeer(*peer_id, rep::BAD_BLOCK)) @@ -1903,12 +1840,7 @@ where } /// Submit a warp proof response received. - #[must_use] - pub fn on_warp_sync_response( - &mut self, - peer_id: &PeerId, - response: EncodedProof, - ) -> Result<(), BadPeer> { + pub fn on_warp_sync_response(&mut self, peer_id: &PeerId, response: EncodedProof) { if let Some(peer) = self.peers.get_mut(peer_id) { if let PeerSyncState::DownloadingWarpProof = peer.state { peer.state = PeerSyncState::Available; @@ -1925,14 +1857,16 @@ where sync.import_warp_proof(response) } else { debug!(target: LOG_TARGET, "Ignored obsolete warp sync response from {peer_id}"); - return Err(BadPeer(*peer_id, rep::NOT_REQUESTED)) + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::NOT_REQUESTED))); + return }; match import_result { - WarpProofImportResult::Success => Ok(()), + WarpProofImportResult::Success => {}, WarpProofImportResult::BadResponse => { debug!(target: LOG_TARGET, "Bad proof data received from {peer_id}"); - Err(BadPeer(*peer_id, rep::BAD_BLOCK)) + self.actions.push(ChainSyncAction::DropPeer(BadPeer(*peer_id, rep::BAD_BLOCK))); }, } } @@ -1942,17 +1876,14 @@ where /// Call this when a batch of blocks have been processed by the import /// queue, with or without errors. If an error is returned, the pending response /// from the peer must be dropped. - #[must_use] pub fn on_blocks_processed( &mut self, imported: usize, count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> Box, BadPeer>>> { + ) { trace!(target: LOG_TARGET, "Imported {imported} of {count}"); - let mut output = Vec::new(); - let mut has_error = false; for (_, hash) in &results { self.queue_blocks.remove(hash); @@ -1993,7 +1924,10 @@ where if aux.bad_justification { if let Some(ref peer) = peer_id { warn!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(*peer, rep::BAD_JUSTIFICATION))); + self.actions.push(ChainSyncAction::DropPeer(BadPeer( + *peer, + rep::BAD_JUSTIFICATION, + ))); } } @@ -2010,7 +1944,7 @@ where ); self.state_sync = None; self.mode = SyncMode::Full; - output.extend(self.restart()); + self.restart(); } let warp_sync_complete = self .warp_sync @@ -2024,7 +1958,7 @@ where ); self.warp_sync = None; self.mode = SyncMode::Full; - output.extend(self.restart()); + self.restart(); } let gap_sync_complete = self.gap_sync.as_ref().map_or(false, |s| s.target == number); @@ -2042,8 +1976,9 @@ where target: LOG_TARGET, "💔 Peer sent block with incomplete header to import", ); - output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); - output.extend(self.restart()); + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::INCOMPLETE_HEADER))); + self.restart(); }, Err(BlockImportError::VerificationFailed(peer_id, e)) => { let extra_message = peer_id @@ -2055,10 +1990,11 @@ where ); if let Some(peer) = peer_id { - output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); + self.actions + .push(ChainSyncAction::DropPeer(BadPeer(peer, rep::VERIFICATION_FAIL))); } - output.extend(self.restart()); + self.restart(); }, Err(BlockImportError::BadBlock(peer_id)) => if let Some(peer) = peer_id { @@ -2066,7 +2002,7 @@ where target: LOG_TARGET, "💔 Block {hash:?} received from peer {peer} has been blacklisted", ); - output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); + self.actions.push(ChainSyncAction::DropPeer(BadPeer(peer, rep::BAD_BLOCK))); }, Err(BlockImportError::MissingState) => { // This may happen if the chain we were requesting upon has been discarded @@ -2078,14 +2014,19 @@ where warn!(target: LOG_TARGET, "💔 Error importing block {hash:?}: {}", e.unwrap_err()); self.state_sync = None; self.warp_sync = None; - output.extend(self.restart()); + self.restart(); }, Err(BlockImportError::Cancelled) => {}, }; } self.allowed_requests.set_all(); - Box::new(output.into_iter()) + } + + /// Get pending actions to perform. + #[must_use] + pub fn take_actions(&mut self) -> impl Iterator> { + std::mem::take(&mut self.actions).into_iter() } } diff --git a/substrate/client/network/sync/src/chain_sync/test.rs b/substrate/client/network/sync/src/chain_sync/test.rs index 2eefd2ad13e..15b2a95a07c 100644 --- a/substrate/client/network/sync/src/chain_sync/test.rs +++ b/substrate/client/network/sync/src/chain_sync/test.rs @@ -53,7 +53,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { }; // add a new peer with the same best block - sync.new_peer(peer_id, a1_hash, a1_number).unwrap(); + sync.new_peer(peer_id, a1_hash, a1_number); // and request a justification for the block sync.request_justification(&a1_hash, a1_number); @@ -74,10 +74,8 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { // if the peer replies with an empty response (i.e. it doesn't know the block), // the active request should be cleared. - assert_eq!( - sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }), - Ok(OnBlockJustification::Nothing), - ); + sync.on_block_justification(peer_id, BlockResponse:: { id: 0, blocks: vec![] }) + .unwrap(); // there should be no in-flight requests assert_eq!(sync.extra_justifications.active_requests().count(), 0); @@ -119,8 +117,8 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { let (b1_hash, b1_number) = new_blocks(50); // add 2 peers at blocks that we don't have locally - sync.new_peer(peer_id1, Hash::random(), 42).unwrap(); - sync.new_peer(peer_id2, Hash::random(), 10).unwrap(); + sync.new_peer(peer_id1, Hash::random(), 42); + sync.new_peer(peer_id2, Hash::random(), 10); // we wil send block requests to these peers // for these blocks we don't know about @@ -130,7 +128,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); // add a new peer at a known block - sync.new_peer(peer_id3, b1_hash, b1_number).unwrap(); + sync.new_peer(peer_id3, b1_hash, b1_number); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -148,14 +146,19 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { PeerSyncState::DownloadingJustification(b1_hash), ); + // clear old actions + let _ = sync.take_actions(); + // we restart the sync state - let block_requests = sync.restart(); + sync.restart(); + let actions = sync.take_actions().collect::>(); // which should make us send out block requests to the first two peers - assert!(block_requests.map(|r| r.unwrap()).all(|event| match event { - BlockRequestAction::SendRequest { peer_id, .. } => - peer_id == peer_id1 || peer_id == peer_id2, - BlockRequestAction::RemoveStale { .. } => false, + assert_eq!(actions.len(), 2); + assert!(actions.iter().all(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => + peer_id == &peer_id1 || peer_id == &peer_id2, + _ => false, })); // peer 3 should be unaffected it was downloading finality data @@ -166,7 +169,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { // Set common block to something that we don't have (e.g. failed import) sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; - let _ = sync.restart().count(); + sync.restart(); assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); } @@ -280,9 +283,8 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { let best_block = blocks.last().unwrap().clone(); let max_blocks_to_request = sync.max_blocks_per_request; // Connect the node we will sync from - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); - sync.new_peer(peer_id2, info.best_hash, 0).unwrap(); + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()); + sync.new_peer(peer_id2, info.best_hash, 0); let mut best_block_num = 0; while best_block_num < MAX_DOWNLOAD_AHEAD { @@ -300,11 +302,17 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + // Clear old actions to not deal with them + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize, + )); best_block_num += max_blocks_to_request as u32; @@ -356,11 +364,14 @@ fn do_ancestor_search_when_common_block_to_best_qeued_gap_is_to_big() { assert_eq!(FromBlock::Number(best_block_num as u64), peer2_req.from); let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); - let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); - assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.is_empty() - ),); + + // Clear old actions to not deal with them + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert!(actions.is_empty()); let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); @@ -421,25 +432,34 @@ fn can_sync_huge_fork() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); let mut request = get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + // Discard old actions we are not interested in + let _ = sync.take_actions(); + // Do the ancestor search loop { let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; let response = create_block_response(vec![block.clone()]); - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + + request = if actions.is_empty() { // We found the ancenstor break + } else { + assert_eq!(actions.len(), 1); + match &actions[0] { + ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + action @ _ => panic!("Unexpected action: {action:?}"), + } }; log::trace!(target: LOG_TARGET, "Request: {request:?}"); @@ -463,15 +483,18 @@ fn can_sync_huge_fork() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == sync.max_blocks_per_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == sync.max_blocks_per_request as usize + )); best_block_num += sync.max_blocks_per_request as u32; - let _ = sync.on_blocks_processed( + sync.on_blocks_processed( max_blocks_to_request as usize, max_blocks_to_request as usize, resp_blocks @@ -490,6 +513,9 @@ fn can_sync_huge_fork() { .collect(), ); + // Discard pending actions + let _ = sync.take_actions(); + resp_blocks .into_iter() .rev() @@ -539,25 +565,34 @@ fn syncs_fork_without_duplicate_requests() { let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); send_block_announce(fork_blocks.last().unwrap().header().clone(), peer_id1, &mut sync); let mut request = get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); + // Discard pending actions + let _ = sync.take_actions(); + // Do the ancestor search loop { let block = &fork_blocks[unwrap_from_block_number(request.from.clone()) as usize - 1]; let response = create_block_response(vec![block.clone()]); - let on_block_data = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - request = if let OnBlockData::Request(_peer, request) = on_block_data { - request - } else { + sync.on_block_data(&peer_id1, Some(request), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + + request = if actions.is_empty() { // We found the ancenstor break + } else { + assert_eq!(actions.len(), 1); + match &actions[0] { + ChainSyncAction::SendBlockRequest { peer_id: _, request } => request.clone(), + action @ _ => panic!("Unexpected action: {action:?}"), + } }; log::trace!(target: LOG_TARGET, "Request: {request:?}"); @@ -582,11 +617,17 @@ fn syncs_fork_without_duplicate_requests() { let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + // Discard old actions + let _ = sync.take_actions(); + + sync.on_block_data(&peer_id1, Some(request.clone()), response).unwrap(); + + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == max_blocks_to_request as usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == max_blocks_to_request as usize + )); best_block_num += max_blocks_to_request as u32; @@ -653,8 +694,7 @@ fn removes_target_fork_on_disconnect() { let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); // Create a "new" header and announce it let mut header = blocks[0].header().clone(); @@ -678,8 +718,7 @@ fn can_import_response_with_missing_blocks() { let peer_id1 = PeerId::random(); let best_block = blocks[3].clone(); - sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, best_block.hash(), *best_block.header().number()); sync.peers.get_mut(&peer_id1).unwrap().state = PeerSyncState::Available; sync.peers.get_mut(&peer_id1).unwrap().common_number = 0; @@ -730,7 +769,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { let (b1_hash, b1_number) = new_blocks(50); // add new peer and request blocks from them - sync.new_peer(peers[0], Hash::random(), 42).unwrap(); + sync.new_peer(peers[0], Hash::random(), 42); // we don't actually perform any requests, just keep track of peers waiting for a response let mut pending_responses = HashSet::new(); @@ -743,7 +782,7 @@ fn sync_restart_removes_block_but_not_justification_requests() { } // add a new peer at a known block - sync.new_peer(peers[1], b1_hash, b1_number).unwrap(); + sync.new_peer(peers[1], b1_hash, b1_number); // we request a justification for a block we have locally sync.request_justification(&b1_hash, b1_number); @@ -766,24 +805,29 @@ fn sync_restart_removes_block_but_not_justification_requests() { ); assert_eq!(pending_responses.len(), 2); + // discard old actions + let _ = sync.take_actions(); + // restart sync - let request_events = sync.restart().collect::>(); - for event in request_events.iter() { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { peer_id } => { + sync.restart(); + let actions = sync.take_actions().collect::>(); + for action in actions.iter() { + match action { + ChainSyncAction::CancelBlockRequest { peer_id } => { pending_responses.remove(&peer_id); }, - BlockRequestAction::SendRequest { peer_id, .. } => { + ChainSyncAction::SendBlockRequest { peer_id, .. } => { // we drop obsolete response, but don't register a new request, it's checked in // the `assert!` below pending_responses.remove(&peer_id); }, + action @ _ => panic!("Unexpected action: {action:?}"), } } - assert!(request_events.iter().any(|event| { - match event.as_ref().unwrap() { - BlockRequestAction::RemoveStale { .. } => false, - BlockRequestAction::SendRequest { peer_id, .. } => peer_id == &peers[0], + assert!(actions.iter().any(|action| { + match action { + ChainSyncAction::SendBlockRequest { peer_id, .. } => peer_id == &peers[0], + _ => false, } })); @@ -848,11 +892,9 @@ fn request_across_forks() { // Add the peers, all at the common ancestor 100. let common_block = blocks.last().unwrap(); let peer_id1 = PeerId::random(); - sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id1, common_block.hash(), *common_block.header().number()); let peer_id2 = PeerId::random(); - sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()) - .unwrap(); + sync.new_peer(peer_id2, common_block.hash(), *common_block.header().number()); // Peer 1 announces 107 from fork 1, 100-107 get downloaded. { @@ -864,11 +906,17 @@ fn request_across_forks() { let mut resp_blocks = fork_a_blocks[100_usize..107_usize].to_vec(); resp_blocks.reverse(); let response = create_block_response(resp_blocks.clone()); - let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + + // Drop old actions + let _ = sync.take_actions(); + + sync.on_block_data(&peer, Some(request), response).unwrap(); + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 7_usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 7_usize + )); assert_eq!(sync.best_queued_number, 107); assert_eq!(sync.best_queued_hash, block.hash()); assert!(sync.is_known(&block.header.parent_hash())); @@ -903,11 +951,17 @@ fn request_across_forks() { // block is announced. let request = get_block_request(&mut sync, FromBlock::Hash(block.hash()), 1, &peer); let response = create_block_response(vec![block.clone()]); - let res = sync.on_block_data(&peer, Some(request), response).unwrap(); + + // Drop old actions we are not going to check + let _ = sync.take_actions(); + + sync.on_block_data(&peer, Some(request), response).unwrap(); + let actions = sync.take_actions().collect::>(); + assert_eq!(actions.len(), 1); assert!(matches!( - res, - OnBlockData::Import(ImportBlocksAction{ origin: _, blocks }) if blocks.len() == 1_usize - ),); + &actions[0], + ChainSyncAction::ImportBlocks{ origin: _, blocks } if blocks.len() == 1_usize + )); assert!(sync.is_known(&block.header.parent_hash())); } } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 560887132e3..58a9fdc49f2 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -25,10 +25,7 @@ use crate::{ }, block_relay_protocol::{BlockDownloader, BlockResponseError}, block_request_handler::MAX_BLOCKS_IN_RESPONSE, - chain_sync::{ - BlockRequestAction, ChainSync, ImportBlocksAction, ImportJustificationsAction, - OnBlockResponse, OnStateResponse, - }, + chain_sync::{ChainSync, ChainSyncAction}, pending_responses::{PendingResponses, ResponseEvent}, schema::v1::{StateRequest, StateResponse}, service::{ @@ -58,7 +55,7 @@ use schnellru::{ByLength, LruMap}; use tokio::time::{Interval, MissedTickBehavior}; use sc_client_api::{BlockBackend, HeaderBackend, ProofProvider}; -use sc_consensus::import_queue::ImportQueueService; +use sc_consensus::{import_queue::ImportQueueService, IncomingBlock}; use sc_network::{ config::{ FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, @@ -74,8 +71,11 @@ use sc_network_common::{ }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::{Error as ClientError, HeaderMetadata}; -use sp_consensus::block_validation::BlockAnnounceValidator; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, Zero}; +use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin}; +use sp_runtime::{ + traits::{Block as BlockT, Header, NumberFor, Zero}, + Justifications, +}; use std::{ collections::{HashMap, HashSet}, @@ -713,11 +713,67 @@ where self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); + // Process actions requested by `ChainSync` during `select!`. + self.process_chain_sync_actions(); + // Send outbound requests on `ChanSync`'s behalf. self.send_chain_sync_requests(); } } + fn process_chain_sync_actions(&mut self) { + self.chain_sync.take_actions().for_each(|action| match action { + ChainSyncAction::SendBlockRequest { peer_id, request } => { + // Sending block request implies dropping obsolete pending response as we are not + // interested in it anymore (see [`ChainSyncAction::SendBlockRequest`]). + // Furthermore, only one request at a time is allowed to any peer. + let removed = self.pending_responses.remove(&peer_id); + self.send_block_request(peer_id, request.clone()); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendBlockRequest` to {} with {:?}, stale response removed: {}.", + peer_id, + request, + removed, + ) + }, + ChainSyncAction::CancelBlockRequest { peer_id } => { + let removed = self.pending_responses.remove(&peer_id); + + trace!(target: LOG_TARGET, "Processed {action:?}., response removed: {removed}."); + }, + ChainSyncAction::DropPeer(BadPeer(peer_id, rep)) => { + self.pending_responses.remove(&peer_id); + self.network_service + .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); + self.network_service.report_peer(peer_id, rep); + + trace!(target: LOG_TARGET, "Processed {action:?}."); + }, + ChainSyncAction::ImportBlocks { origin, blocks } => { + let count = blocks.len(); + self.import_blocks(origin, blocks); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::ImportBlocks` with {count} blocks.", + ); + }, + ChainSyncAction::ImportJustifications { peer_id, hash, number, justifications } => { + self.import_justifications(peer_id, hash, number, justifications); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::ImportJustifications` from peer {} for block {} ({}).", + peer_id, + hash, + number, + ) + }, + }); + } + fn perform_periodic_actions(&mut self) { self.report_metrics(); @@ -766,28 +822,7 @@ where ToServiceCommand::ClearJustificationRequests => self.chain_sync.clear_justification_requests(), ToServiceCommand::BlocksProcessed(imported, count, results) => { - for result in self.chain_sync.on_blocks_processed(imported, count, results) { - match result { - Ok(action) => match action { - BlockRequestAction::SendRequest { peer_id, request } => { - // drop obsolete pending response first - self.pending_responses.remove(&peer_id); - self.send_block_request(peer_id, request); - }, - BlockRequestAction::RemoveStale { peer_id } => { - self.pending_responses.remove(&peer_id); - }, - }, - Err(BadPeer(peer_id, repu)) => { - self.pending_responses.remove(&peer_id); - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, repu) - }, - } - } + self.chain_sync.on_blocks_processed(imported, count, results); }, ToServiceCommand::JustificationImported(peer_id, hash, number, success) => { self.chain_sync.on_justification_import(hash, number, success); @@ -940,9 +975,7 @@ where } } - if let Some(import_blocks_action) = self.chain_sync.peer_disconnected(&peer_id) { - self.import_blocks(import_blocks_action) - } + self.chain_sync.peer_disconnected(&peer_id); self.pending_responses.remove(&peer_id); self.event_streams.retain(|stream| { @@ -1053,17 +1086,7 @@ where inbound, }; - let req = if peer.info.roles.is_full() { - match self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number) { - Ok(req) => req, - Err(BadPeer(id, repu)) => { - self.network_service.report_peer(id, repu); - return Err(()) - }, - } - } else { - None - }; + self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number); log::debug!(target: LOG_TARGET, "Connected {peer_id}"); @@ -1075,10 +1098,6 @@ where self.num_in_peers += 1; } - if let Some(req) = req { - self.send_block_request(peer_id, req); - } - self.event_streams .retain(|stream| stream.unbounded_send(SyncEvent::PeerConnected(peer_id)).is_ok()); @@ -1202,22 +1221,7 @@ where PeerRequest::Block(req) => { match self.block_downloader.block_response_into_blocks(&req, resp) { Ok(blocks) => { - match self.chain_sync.on_block_response(peer_id, req, blocks) { - OnBlockResponse::SendBlockRequest { peer_id, request } => - self.send_block_request(peer_id, request), - OnBlockResponse::ImportBlocks(import_blocks_action) => - self.import_blocks(import_blocks_action), - OnBlockResponse::ImportJustifications(action) => - self.import_justifications(action), - OnBlockResponse::Nothing => {}, - OnBlockResponse::DisconnectPeer(BadPeer(peer_id, rep)) => { - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, rep); - }, - } + self.chain_sync.on_block_response(peer_id, req, blocks); }, Err(BlockResponseError::DecodeFailed(e)) => { debug!( @@ -1262,27 +1266,10 @@ where }, }; - match self.chain_sync.on_state_response(peer_id, response) { - OnStateResponse::ImportBlocks(import_blocks_action) => - self.import_blocks(import_blocks_action), - OnStateResponse::DisconnectPeer(BadPeer(peer_id, rep)) => { - self.network_service.disconnect_peer( - peer_id, - self.block_announce_protocol_name.clone(), - ); - self.network_service.report_peer(peer_id, rep); - }, - OnStateResponse::Nothing => {}, - } + self.chain_sync.on_state_response(peer_id, response); }, PeerRequest::WarpProof => { - if let Err(BadPeer(peer_id, rep)) = - self.chain_sync.on_warp_sync_response(&peer_id, EncodedProof(resp)) - { - self.network_service - .disconnect_peer(peer_id, self.block_announce_protocol_name.clone()); - self.network_service.report_peer(peer_id, rep); - } + self.chain_sync.on_warp_sync_response(&peer_id, EncodedProof(resp)); }, }, Ok(Err(e)) => { @@ -1388,7 +1375,7 @@ where } /// Import blocks. - fn import_blocks(&mut self, ImportBlocksAction { origin, blocks }: ImportBlocksAction) { + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if let Some(metrics) = &self.metrics { metrics.import_queue_blocks_submitted.inc(); } @@ -1397,13 +1384,17 @@ where } /// Import justifications. - fn import_justifications(&mut self, action: ImportJustificationsAction) { + fn import_justifications( + &mut self, + peer_id: PeerId, + hash: B::Hash, + number: NumberFor, + justifications: Justifications, + ) { if let Some(metrics) = &self.metrics { metrics.import_queue_justifications_submitted.inc(); } - let ImportJustificationsAction { peer_id, hash, number, justifications } = action; - self.import_queue.import_justifications(peer_id, hash, number, justifications); } } -- GitLab From 5f4ce8026693b537beaee3aea5161ee34bac7ace Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 13 Nov 2023 11:21:16 +0100 Subject: [PATCH 030/199] PVF host: Make unavailable security features print a warning (#2244) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- polkadot/node/core/pvf/src/host.rs | 19 +- polkadot/node/core/pvf/src/security.rs | 275 +++++++++++++++++-------- 2 files changed, 189 insertions(+), 105 deletions(-) diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index dd0bd858198..7b383e8034a 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -29,12 +29,11 @@ use crate::{ use always_assert::never; use futures::{ channel::{mpsc, oneshot}, - join, Future, FutureExt, SinkExt, StreamExt, + Future, FutureExt, SinkExt, StreamExt, }; use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, pvf::PvfPrepData, - SecurityStatus, }; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ @@ -208,21 +207,7 @@ pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Fu gum::debug!(target: LOG_TARGET, ?config, "starting PVF validation host"); // Run checks for supported security features once per host startup. Warn here if not enabled. - let security_status = { - // TODO: add check that syslog is available and that seccomp violations are logged? - let (can_enable_landlock, can_enable_seccomp, can_unshare_user_namespace_and_change_root) = join!( - security::check_landlock(&config.prepare_worker_program_path), - security::check_seccomp(&config.prepare_worker_program_path), - security::check_can_unshare_user_namespace_and_change_root( - &config.prepare_worker_program_path - ) - ); - SecurityStatus { - can_enable_landlock, - can_enable_seccomp, - can_unshare_user_namespace_and_change_root, - } - }; + let security_status = security::check_security_status(&config).await; let (to_host_tx, to_host_rx) = mpsc::channel(10); diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs index decd321e415..295dd7df94d 100644 --- a/polkadot/node/core/pvf/src/security.rs +++ b/polkadot/node/core/pvf/src/security.rs @@ -14,22 +14,142 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::LOG_TARGET; -use std::path::Path; +use crate::{Config, SecurityStatus, LOG_TARGET}; +use futures::join; +use std::{fmt, path::Path}; use tokio::{ fs::{File, OpenOptions}, io::{AsyncReadExt, AsyncSeekExt, SeekFrom}, }; -/// Check if we can sandbox the root and emit a warning if not. +const SECURE_MODE_ANNOUNCEMENT: &'static str = + "In the next release this will be a hard error by default. + \nMore information: https://wiki.polkadot.network/docs/maintain-guides-secure-validator#secure-validator-mode"; + +/// Run checks for supported security features. +pub async fn check_security_status(config: &Config) -> SecurityStatus { + let Config { prepare_worker_program_path, .. } = config; + + // TODO: add check that syslog is available and that seccomp violations are logged? + let (landlock, seccomp, change_root) = join!( + check_landlock(prepare_worker_program_path), + check_seccomp(prepare_worker_program_path), + check_can_unshare_user_namespace_and_change_root(prepare_worker_program_path) + ); + + let security_status = SecurityStatus { + can_enable_landlock: landlock.is_ok(), + can_enable_seccomp: seccomp.is_ok(), + can_unshare_user_namespace_and_change_root: change_root.is_ok(), + }; + + let errs: Vec = [landlock, seccomp, change_root] + .into_iter() + .filter_map(|result| result.err()) + .collect(); + let err_occurred = print_secure_mode_message(errs); + if err_occurred { + gum::error!( + target: LOG_TARGET, + "{}", + SECURE_MODE_ANNOUNCEMENT, + ); + } + + security_status +} + +type SecureModeResult = std::result::Result<(), SecureModeError>; + +/// Errors related to enabling Secure Validator Mode. +#[derive(Debug)] +enum SecureModeError { + CannotEnableLandlock(String), + CannotEnableSeccomp(String), + CannotUnshareUserNamespaceAndChangeRoot(String), +} + +impl SecureModeError { + /// Whether this error is allowed with Secure Validator Mode enabled. + fn is_allowed_in_secure_mode(&self) -> bool { + use SecureModeError::*; + match self { + CannotEnableLandlock(_) => true, + CannotEnableSeccomp(_) => false, + CannotUnshareUserNamespaceAndChangeRoot(_) => false, + } + } +} + +impl fmt::Display for SecureModeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use SecureModeError::*; + match self { + CannotEnableLandlock(err) => write!(f, "Cannot enable landlock, a Linux 5.13+ kernel security feature: {err}"), + CannotEnableSeccomp(err) => write!(f, "Cannot enable seccomp, a Linux-specific kernel security feature: {err}"), + CannotUnshareUserNamespaceAndChangeRoot(err) => write!(f, "Cannot unshare user namespace and change root, which are Linux-specific kernel security features: {err}"), + } + } +} + +/// Errors if Secure Validator Mode and some mandatory errors occurred, warn otherwise. +/// +/// # Returns +/// +/// `true` if an error was printed, `false` otherwise. +fn print_secure_mode_message(errs: Vec) -> bool { + // Trying to run securely and some mandatory errors occurred. + const SECURE_MODE_ERROR: &'static str = "🚨 Your system cannot securely run a validator. \ + \nRunning validation of malicious PVF code has a higher risk of compromising this machine."; + // Some errors occurred when running insecurely, or some optional errors occurred when running + // securely. + const SECURE_MODE_WARNING: &'static str = "🚨 Some security issues have been detected. \ + \nRunning validation of malicious PVF code has a higher risk of compromising this machine."; + + if errs.is_empty() { + return false + } + + let errs_allowed = errs.iter().all(|err| err.is_allowed_in_secure_mode()); + let errs_string: String = errs + .iter() + .map(|err| { + format!( + "\n - {}{}", + if err.is_allowed_in_secure_mode() { "Optional: " } else { "" }, + err + ) + }) + .collect(); + + if errs_allowed { + gum::warn!( + target: LOG_TARGET, + "{}{}", + SECURE_MODE_WARNING, + errs_string, + ); + false + } else { + gum::error!( + target: LOG_TARGET, + "{}{}", + SECURE_MODE_ERROR, + errs_string, + ); + true + } +} + +/// Check if we can change root to a new, sandboxed root and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_can_unshare_user_namespace_and_change_root( +async fn check_can_unshare_user_namespace_and_change_root( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { match tokio::process::Command::new(prepare_worker_program_path) @@ -37,50 +157,37 @@ pub async fn check_can_unshare_user_namespace_and_change_root( .output() .await { - Ok(output) if output.status.success() => true, + Ok(output) if output.status.success() => Ok(()), Ok(output) => { let stderr = std::str::from_utf8(&output.stderr) .expect("child process writes a UTF-8 string to stderr; qed") .trim(); - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - // Docs say to always print status using `Display` implementation. - status = %output.status, - %stderr, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running with support for unsharing user namespaces for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("not available: {}", stderr) + )) }, + Err(err) => + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not start child process: {}", err) + )), } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot unshare user namespace and change root, which are Linux-specific kernel security features. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with support for unsharing user namespaces for maximum security." - ); - false + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + "only available on Linux".into() + )) } } } -/// Check if landlock is supported and emit a warning if not. +/// Check if landlock is supported and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_landlock( +async fn check_landlock( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { match tokio::process::Command::new(prepare_worker_program_path) @@ -88,81 +195,73 @@ pub async fn check_landlock( .status() .await { - Ok(status) if status.success() => true, - Ok(status) => { + Ok(status) if status.success() => Ok(()), + Ok(_status) => { let abi = polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - ?status, - %abi, - "Cannot fully enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false + Err(SecureModeError::CannotEnableLandlock( + format!("landlock ABI {} not available", abi) + )) }, + Err(err) => + Err(SecureModeError::CannotEnableLandlock( + format!("could not start child process: {}", err) + )), } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot enable landlock, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with landlock support for maximum security." - ); - false + Err(SecureModeError::CannotEnableLandlock( + "only available on Linux".into() + )) } } } -/// Check if seccomp is supported and emit a warning if not. +/// Check if seccomp is supported and return an error if not. /// /// We do this check by spawning a new process and trying to sandbox it. To get as close as possible /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. -pub async fn check_seccomp( +async fn check_seccomp( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, -) -> bool { +) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { - match tokio::process::Command::new(prepare_worker_program_path) - .arg("--check-can-enable-seccomp") - .status() - .await - { - Ok(status) if status.success() => true, - Ok(status) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - ?status, - "Cannot fully enable seccomp, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider upgrading the kernel version for maximum security." - ); - false - }, - Err(err) => { - gum::warn!( - target: LOG_TARGET, - ?prepare_worker_program_path, - "Could not start child process: {}", - err - ); - false - }, + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + match tokio::process::Command::new(prepare_worker_program_path) + .arg("--check-can-enable-seccomp") + .status() + .await + { + Ok(status) if status.success() => Ok(()), + Ok(_status) => + Err(SecureModeError::CannotEnableSeccomp( + "not available".into() + )), + Err(err) => + Err(SecureModeError::CannotEnableSeccomp( + format!("could not start child process: {}", err) + )), + } + } else { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on CPUs from the x86_64 family (usually Intel or AMD)".into() + )) + } } } else { - gum::warn!( - target: LOG_TARGET, - "Cannot enable seccomp, a Linux-specific kernel security feature. Running validation of malicious PVF code has a higher risk of compromising this machine. Consider running on Linux with seccomp support for maximum security." - ); - false + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86_64")] { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on Linux".into() + )) + } else { + Err(SecureModeError::CannotEnableSeccomp( + "only supported on Linux and on CPUs from the x86_64 family (usually Intel or AMD).".into() + )) + } + } } } } -- GitLab From 604704a84ce690eca93fd053abec1bb0a2b0ee32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 13 Nov 2023 13:57:52 +0100 Subject: [PATCH 031/199] wasm-builder: Optimize `rerun-if-changed` logic (#2282) Optimizes the `rerun-if-changed` logic by ignoring `dev-dependencies` and also not outputting paths. Because outputting paths could lead to include unwanted crates in the rerun checks. --- substrate/utils/wasm-builder/src/wasm_project.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index c41e0935d75..2e6f671c45e 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -18,7 +18,7 @@ use crate::{write_file_if_changed, CargoCommandVersioned, OFFLINE}; use build_helper::rerun_if_changed; -use cargo_metadata::{CargoOpt, Metadata, MetadataCommand}; +use cargo_metadata::{DependencyKind, Metadata, MetadataCommand}; use parity_wasm::elements::{deserialize_buffer, Module}; use std::{ borrow::ToOwned, @@ -89,8 +89,7 @@ fn crate_metadata(cargo_manifest: &Path) -> Metadata { cargo_manifest.to_path_buf() }; - let mut crate_metadata_command = create_metadata_command(cargo_manifest); - crate_metadata_command.features(CargoOpt::AllFeatures); + let crate_metadata_command = create_metadata_command(cargo_manifest); let crate_metadata = crate_metadata_command .exec() @@ -915,6 +914,11 @@ fn generate_rerun_if_changed_instructions( packages.insert(DeduplicatePackage::from(package)); while let Some(dependency) = dependencies.pop() { + // Ignore all dev dependencies + if dependency.kind == DependencyKind::Development { + continue; + } + let path_or_git_dep = dependency.source.as_ref().map(|s| s.starts_with("git+")).unwrap_or(true); @@ -967,9 +971,7 @@ fn package_rerun_if_changed(package: &DeduplicatePackage) { p.path() == manifest_path || !p.path().is_dir() || !p.path().join("Cargo.toml").exists() }) .filter_map(|p| p.ok().map(|p| p.into_path())) - .filter(|p| { - p.is_dir() || p.extension().map(|e| e == "rs" || e == "toml").unwrap_or_default() - }) + .filter(|p| p.extension().map(|e| e == "rs" || e == "toml").unwrap_or_default()) .for_each(rerun_if_changed); } -- GitLab From ebcf0a0f1cab2d43718ba96d26e5687f4d14580a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 13 Nov 2023 14:32:02 +0100 Subject: [PATCH 032/199] pallet-grandpa: Remove `GRANDPA_AUTHORITIES_KEY` (#2181) Remove the `GRANDPA_AUTHORITIES_KEY` key and its usage. Apparently this was used in the early days to communicate the grandpa authorities to the node. However, we have now a runtime api that does this for us. So, this pull request is moving from the custom managed storage item to a FRAME managed storage item. This pr also includes a migration for doing the switch on a running chain. --------- Co-authored-by: Davide Galassi --- polkadot/runtime/rococo/src/lib.rs | 2 + polkadot/runtime/westend/src/lib.rs | 1 + substrate/client/consensus/grandpa/src/lib.rs | 3 - substrate/frame/grandpa/src/lib.rs | 37 +++---- substrate/frame/grandpa/src/migrations.rs | 3 + substrate/frame/grandpa/src/migrations/v5.rs | 96 +++++++++++++++++++ .../primitives/consensus/grandpa/src/lib.rs | 65 +------------ 7 files changed, 124 insertions(+), 83 deletions(-) create mode 100644 substrate/frame/grandpa/src/migrations/v5.rs diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 697d22c311a..57767b70d23 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1497,6 +1497,8 @@ pub mod migrations { frame_support::migrations::RemovePallet::DbWeight>, frame_support::migrations::RemovePallet::DbWeight>, frame_support::migrations::RemovePallet::DbWeight>, + + pallet_grandpa::migrations::MigrateV4ToV5, ); } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index fe9ed22f437..1c97e54da48 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1558,6 +1558,7 @@ pub mod migrations { pallet_nomination_pools::migration::versioned_migrations::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, pallet_nomination_pools::migration::versioned_migrations::V6ToV7, + pallet_grandpa::migrations::MigrateV4ToV5, ); } diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs index da621abd254..a4584e6fc80 100644 --- a/substrate/client/consensus/grandpa/src/lib.rs +++ b/substrate/client/consensus/grandpa/src/lib.rs @@ -471,9 +471,6 @@ where Client: ExecutorProvider + HeaderBackend, { fn get(&self) -> Result { - // This implementation uses the Grandpa runtime API instead of reading directly from the - // `GRANDPA_AUTHORITIES_KEY` as the data may have been migrated since the genesis block of - // the chain, whereas the runtime API is backwards compatible. self.executor() .call( self.expect_block_hash_from_id(&BlockId::Number(Zero::zero()))?, diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 95d1c8aa609..0b9f2b35827 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -30,14 +30,13 @@ // Re-export since this is necessary for `impl_apis` in runtime. pub use sp_consensus_grandpa::{ - self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, VersionedAuthorityList, + self as fg_primitives, AuthorityId, AuthorityList, AuthorityWeight, }; -use codec::{self as codec, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ dispatch::{DispatchResultWithPostInfo, Pays}, pallet_prelude::Get, - storage, traits::OneSessionHandler, weights::Weight, WeakBoundedVec, @@ -45,8 +44,8 @@ use frame_support::{ use frame_system::pallet_prelude::BlockNumberFor; use scale_info::TypeInfo; use sp_consensus_grandpa::{ - ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_AUTHORITIES_KEY, - GRANDPA_ENGINE_ID, RUNTIME_LOG_TARGET as LOG_TARGET, + ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_ENGINE_ID, + RUNTIME_LOG_TARGET as LOG_TARGET, }; use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult}; use sp_session::{GetSessionNumber, GetValidatorCount}; @@ -75,7 +74,7 @@ pub mod pallet { use frame_system::pallet_prelude::*; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(5); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -145,7 +144,7 @@ pub mod pallet { // enact the change if we've reached the enacting block if block_number == pending_change.scheduled_at + pending_change.delay { - Self::set_grandpa_authorities(&pending_change.next_authorities); + Authorities::::put(&pending_change.next_authorities); Self::deposit_event(Event::NewAuthorities { authority_set: pending_change.next_authorities.into_inner(), }); @@ -342,6 +341,11 @@ pub mod pallet { #[pallet::getter(fn session_for_set)] pub(super) type SetIdSession = StorageMap<_, Twox64Concat, SetId, SessionIndex>; + /// The current list of authorities. + #[pallet::storage] + pub(crate) type Authorities = + StorageValue<_, BoundedAuthorityList, ValueQuery>; + #[derive(frame_support::DefaultNoBound)] #[pallet::genesis_config] pub struct GenesisConfig { @@ -354,7 +358,7 @@ pub mod pallet { impl BuildGenesisConfig for GenesisConfig { fn build(&self) { CurrentSetId::::put(SetId::default()); - Pallet::::initialize(&self.authorities) + Pallet::::initialize(self.authorities.clone()) } } @@ -428,12 +432,7 @@ pub enum StoredState { impl Pallet { /// Get the current set of authorities, along with their respective weights. pub fn grandpa_authorities() -> AuthorityList { - storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() - } - - /// Set the current set of authorities, along with their respective weights. - fn set_grandpa_authorities(authorities: &AuthorityList) { - storage::unhashed::put(GRANDPA_AUTHORITIES_KEY, &VersionedAuthorityList::from(authorities)); + Authorities::::get().into_inner() } /// Schedule GRANDPA to pause starting in the given number of blocks. @@ -522,10 +521,14 @@ impl Pallet { // Perform module initialization, abstracted so that it can be called either through genesis // config builder or through `on_genesis_session`. - fn initialize(authorities: &AuthorityList) { + fn initialize(authorities: AuthorityList) { if !authorities.is_empty() { assert!(Self::grandpa_authorities().is_empty(), "Authorities are already initialized!"); - Self::set_grandpa_authorities(authorities); + Authorities::::put( + &BoundedAuthorityList::::try_from(authorities).expect( + "Grandpa: `Config::MaxAuthorities` is smaller than the number of genesis authorities!", + ), + ); } // NOTE: initialize first session of first set. this is necessary for @@ -568,7 +571,7 @@ where I: Iterator, { let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - Self::initialize(&authorities); + Self::initialize(authorities); } fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) diff --git a/substrate/frame/grandpa/src/migrations.rs b/substrate/frame/grandpa/src/migrations.rs index 6307cbdd3b0..3a484eb60d2 100644 --- a/substrate/frame/grandpa/src/migrations.rs +++ b/substrate/frame/grandpa/src/migrations.rs @@ -22,8 +22,11 @@ use frame_support::{ use crate::{Config, CurrentSetId, SetIdSession, LOG_TARGET}; +pub use v5::MigrateV4ToV5; + /// Version 4. pub mod v4; +mod v5; /// This migration will clean up all stale set id -> session entries from the /// `SetIdSession` storage map, only the latest `max_set_id_session_entries` diff --git a/substrate/frame/grandpa/src/migrations/v5.rs b/substrate/frame/grandpa/src/migrations/v5.rs new file mode 100644 index 00000000000..24cfc34104b --- /dev/null +++ b/substrate/frame/grandpa/src/migrations/v5.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{BoundedAuthorityList, Pallet}; +use codec::Decode; +use frame_support::{ + migrations::VersionedMigration, + storage, + traits::{Get, OnRuntimeUpgrade}, + weights::Weight, +}; +use sp_consensus_grandpa::AuthorityList; +use sp_std::{marker::PhantomData, vec::Vec}; + +const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; + +fn load_authority_list() -> AuthorityList { + storage::unhashed::get_raw(GRANDPA_AUTHORITIES_KEY).map_or_else( + || Vec::new(), + |l| <(u8, AuthorityList)>::decode(&mut &l[..]).unwrap_or_default().1, + ) +} + +/// Actual implementation of [`MigrateV4ToV5`]. +pub struct MigrateImpl(PhantomData); + +impl OnRuntimeUpgrade for MigrateImpl { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + use codec::Encode; + + let authority_list_len = load_authority_list().len() as u32; + + if authority_list_len > T::MaxAuthorities::get() { + return Err( + "Grandpa: `Config::MaxAuthorities` is smaller than the actual number of authorities.".into() + ) + } + + if authority_list_len == 0 { + return Err("Grandpa: Authority list is empty!".into()) + } + + Ok(authority_list_len.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + let len = u32::decode(&mut &state[..]).unwrap(); + + frame_support::ensure!( + len == crate::Pallet::::grandpa_authorities().len() as u32, + "Grandpa: pre-migrated and post-migrated list should have the same length" + ); + + frame_support::ensure!( + load_authority_list().is_empty(), + "Old authority list shouldn't exist anymore" + ); + + Ok(()) + } + + fn on_runtime_upgrade() -> Weight { + crate::Authorities::::put( + &BoundedAuthorityList::::force_from( + load_authority_list(), + Some("Grandpa: `Config::MaxAuthorities` is smaller than the actual number of authorities.") + ) + ); + + storage::unhashed::kill(GRANDPA_AUTHORITIES_KEY); + + T::DbWeight::get().reads_writes(1, 2) + } +} + +/// Migrate the storage from V4 to V5. +/// +/// Switches from `GRANDPA_AUTHORITIES_KEY` to a normal FRAME storage item. +pub type MigrateV4ToV5 = + VersionedMigration<4, 5, MigrateImpl, Pallet, ::DbWeight>; diff --git a/substrate/primitives/consensus/grandpa/src/lib.rs b/substrate/primitives/consensus/grandpa/src/lib.rs index baeaee4738e..1cf5504c5e7 100644 --- a/substrate/primitives/consensus/grandpa/src/lib.rs +++ b/substrate/primitives/consensus/grandpa/src/lib.rs @@ -19,13 +19,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(not(feature = "std"))] -extern crate alloc; - #[cfg(feature = "serde")] use serde::Serialize; -use codec::{Codec, Decode, Encode, Input}; +use codec::{Codec, Decode, Encode}; use scale_info::TypeInfo; #[cfg(feature = "std")] use sp_keystore::KeystorePtr; @@ -33,7 +30,7 @@ use sp_runtime::{ traits::{Header as HeaderT, NumberFor}, ConsensusEngineId, RuntimeDebug, }; -use sp_std::{borrow::Cow, vec::Vec}; +use sp_std::vec::Vec; /// The log target to be used by client code. pub const CLIENT_LOG_TARGET: &str = "grandpa"; @@ -62,10 +59,6 @@ pub type AuthoritySignature = app::Signature; /// The `ConsensusEngineId` of GRANDPA. pub const GRANDPA_ENGINE_ID: ConsensusEngineId = *b"FRNK"; -/// The storage key for the current set of weighted Grandpa authorities. -/// The value stored is an encoded VersionedAuthorityList. -pub const GRANDPA_AUTHORITIES_KEY: &[u8] = b":grandpa_authorities"; - /// The weight of an authority. pub type AuthorityWeight = u64; @@ -464,60 +457,6 @@ where Some(grandpa::SignedMessage { message, signature, id: public }) } -/// WASM function call to check for pending changes. -pub const PENDING_CHANGE_CALL: &str = "grandpa_pending_change"; -/// WASM function call to get current GRANDPA authorities. -pub const AUTHORITIES_CALL: &str = "grandpa_authorities"; - -/// The current version of the stored AuthorityList type. The encoding version MUST be updated any -/// time the AuthorityList type changes. -const AUTHORITIES_VERSION: u8 = 1; - -/// An AuthorityList that is encoded with a version specifier. The encoding version is updated any -/// time the AuthorityList type changes. This ensures that encodings of different versions of an -/// AuthorityList are differentiable. Attempting to decode an authority list with an unknown -/// version will fail. -#[derive(Default)] -pub struct VersionedAuthorityList<'a>(Cow<'a, AuthorityList>); - -impl<'a> From for VersionedAuthorityList<'a> { - fn from(authorities: AuthorityList) -> Self { - VersionedAuthorityList(Cow::Owned(authorities)) - } -} - -impl<'a> From<&'a AuthorityList> for VersionedAuthorityList<'a> { - fn from(authorities: &'a AuthorityList) -> Self { - VersionedAuthorityList(Cow::Borrowed(authorities)) - } -} - -impl<'a> Into for VersionedAuthorityList<'a> { - fn into(self) -> AuthorityList { - self.0.into_owned() - } -} - -impl<'a> Encode for VersionedAuthorityList<'a> { - fn size_hint(&self) -> usize { - (AUTHORITIES_VERSION, self.0.as_ref()).size_hint() - } - - fn using_encoded R>(&self, f: F) -> R { - (AUTHORITIES_VERSION, self.0.as_ref()).using_encoded(f) - } -} - -impl<'a> Decode for VersionedAuthorityList<'a> { - fn decode(value: &mut I) -> Result { - let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; - if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()) - } - Ok(authorities.into()) - } -} - /// An opaque type used to represent the key ownership proof at the runtime API /// boundary. The inner value is an encoded representation of the actual key /// ownership proof which will be parameterized when defining the runtime. At -- GitLab From 60c77a2e9a0e611a227fd2aa82bba1491ea8ba9c Mon Sep 17 00:00:00 2001 From: gupnik <17176722+gupnik@users.noreply.github.com> Date: Mon, 13 Nov 2023 19:14:41 +0530 Subject: [PATCH 033/199] Adds syntax for marking calls feeless (#1926) Fixes https://github.com/paritytech/polkadot-sdk/issues/1725 This PR adds the following changes: 1. An attribute `pallet::feeless_if` that can be optionally attached to a call like so: ```rust #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { *something == 0 })] pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { .... } ``` The closure passed accepts references to arguments as specified in the call fn. It returns a boolean that denotes the conditions required for this call to be "feeless". 2. A signed extension `SkipCheckIfFeeless` that wraps a transaction payment processor such as `pallet_transaction_payment::ChargeTransactionPayment`. It checks for all calls annotated with `pallet::feeless_if` to see if the conditions are met. If so, the wrapped signed extension is not called, essentially making the call feeless. In order to use this, you can simply replace your existing signed extension that manages transaction payment like so: ```diff - pallet_transaction_payment::ChargeTransactionPayment, + pallet_skip_feeless_payment::SkipCheckIfFeeless< + Runtime, + pallet_transaction_payment::ChargeTransactionPayment, + >, ``` ### Todo - [x] Tests - [x] Docs - [x] Prdoc --------- Co-authored-by: Nikhil Gupta <> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Francisco Aguirre Co-authored-by: Liam Aharon --- Cargo.lock | 15 ++ Cargo.toml | 1 + prdoc/pr_1926.prdoc | 30 ++++ substrate/bin/node/cli/Cargo.toml | 3 + substrate/bin/node/cli/src/service.rs | 38 +++-- substrate/bin/node/runtime/Cargo.toml | 4 + substrate/bin/node/runtime/src/lib.rs | 16 +- substrate/bin/node/testing/Cargo.toml | 1 + substrate/bin/node/testing/src/keyring.rs | 4 +- .../frame/examples/kitchensink/src/lib.rs | 4 + .../src/construct_runtime/expand/call.rs | 12 ++ substrate/frame/support/procedural/src/lib.rs | 30 ++++ .../procedural/src/pallet/expand/call.rs | 27 ++++ .../procedural/src/pallet/parse/call.rs | 147 +++++++++++++++--- substrate/frame/support/src/dispatch.rs | 14 ++ substrate/frame/support/src/lib.rs | 7 +- .../call_feeless_invalid_closure_arg1.rs | 37 +++++ .../call_feeless_invalid_closure_arg1.stderr | 5 + .../call_feeless_invalid_closure_arg2.rs | 37 +++++ .../call_feeless_invalid_closure_arg2.stderr | 5 + .../call_feeless_invalid_closure_arg3.rs | 37 +++++ .../call_feeless_invalid_closure_arg3.stderr | 5 + .../call_feeless_invalid_closure_return.rs | 37 +++++ ...call_feeless_invalid_closure_return.stderr | 5 + .../pallet_ui/call_feeless_invalid_type.rs | 37 +++++ .../call_feeless_invalid_type.stderr | 11 ++ .../tests/pallet_ui/call_invalid_attr.stderr | 2 +- .../pallet_ui/call_invalid_origin_type.stderr | 6 - .../test/tests/pallet_ui/pass/feeless_call.rs | 37 +++++ .../skip-feeless-payment/Cargo.toml | 44 ++++++ .../skip-feeless-payment/src/lib.rs | 145 +++++++++++++++++ .../skip-feeless-payment/src/mock.rs | 92 +++++++++++ .../skip-feeless-payment/src/tests.rs | 33 ++++ 33 files changed, 874 insertions(+), 54 deletions(-) create mode 100644 prdoc/pr_1926.prdoc create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs create mode 100644 substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr create mode 100644 substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs create mode 100644 substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml create mode 100644 substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs create mode 100644 substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs create mode 100644 substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 2a091ce6817..dbdc2f856c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7224,6 +7224,7 @@ dependencies = [ "pallet-scheduler", "pallet-session", "pallet-session-benchmarking", + "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", @@ -8853,6 +8854,7 @@ dependencies = [ "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", + "pallet-skip-feeless-payment", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -10795,6 +10797,18 @@ dependencies = [ "sp-std 8.0.0", ] +[[package]] +name = "pallet-skip-feeless-payment" +version = "1.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-runtime", + "sp-std 8.0.0", +] + [[package]] name = "pallet-society" version = "4.0.0-dev" @@ -18203,6 +18217,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-im-online", + "pallet-skip-feeless-payment", "pallet-timestamp", "parity-scale-codec", "platforms", diff --git a/Cargo.toml b/Cargo.toml index 42bbac37a6c..30445bd5945 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -382,6 +382,7 @@ members = [ "substrate/frame/transaction-payment/asset-tx-payment", "substrate/frame/transaction-payment/rpc", "substrate/frame/transaction-payment/rpc/runtime-api", + "substrate/frame/transaction-payment/skip-feeless-payment", "substrate/frame/transaction-storage", "substrate/frame/treasury", "substrate/frame/try-runtime", diff --git a/prdoc/pr_1926.prdoc b/prdoc/pr_1926.prdoc new file mode 100644 index 00000000000..9dc656f1260 --- /dev/null +++ b/prdoc/pr_1926.prdoc @@ -0,0 +1,30 @@ +title: Adds syntax for marking calls feeless + +doc: + - audience: Core Dev + description: | + 1. Adds an attribute `#[pallet::feeless_if]` that can be optionally attached to a `pallet::call`. + 2. Adds a signed extension SkipCheckIfFeeless that wraps a transaction + payment processor to potentially skip payment fees for such calls. + Note that both the attribute and the signed extension are needed to make the call feeless. + +migrations: + db: [] + + runtime: [] + +crates: + - name: "frame-support-procedural" + semver: minor + - name: "pallet-skip-feeless-payment" + semver: major + - pallet-example-kitchensink + semver: patch + - kitchensink-runtime + semver: major + - node-testing + semver: patch + - node-cli + semver: patch + +host_functions: [] diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 5e7ffebaa8e..8f3c2185deb 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -96,6 +96,7 @@ pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } pallet-im-online = { path = "../../../frame/im-online", default-features = false} +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false} # node-specific dependencies kitchensink-runtime = { path = "../runtime" } @@ -168,6 +169,7 @@ runtime-benchmarks = [ "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", + "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", @@ -183,6 +185,7 @@ try-runtime = [ "pallet-assets/try-runtime", "pallet-balances/try-runtime", "pallet-im-online/try-runtime", + "pallet-skip-feeless-payment/try-runtime", "pallet-timestamp/try-runtime", "sp-runtime/try-runtime", "substrate-cli-test-utils/try-runtime", diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 153dda5c0a5..1c71b5a3956 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -91,21 +91,24 @@ pub fn create_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let extra: kitchensink_runtime::SignedExtra = ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal( - period, - best_block.saturated_into(), - )), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( - tip, None, - ), - ); + let extra: kitchensink_runtime::SignedExtra = + ( + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( + period, + best_block.saturated_into(), + )), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::< + kitchensink_runtime::Runtime, + >::from(tip, None), + ), + ); let raw_payload = kitchensink_runtime::SignedPayload::from_raw( function.clone(), @@ -879,8 +882,9 @@ mod tests { let check_era = frame_system::CheckEra::from(Era::Immortal); let check_nonce = frame_system::CheckNonce::from(index); let check_weight = frame_system::CheckWeight::new(); - let tx_payment = - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None); + let tx_payment = pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(0, None), + ); let extra = ( check_non_zero_sender, check_spec_version, diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 836f90e7654..2414358b60b 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -129,6 +129,7 @@ pallet-transaction-payment = { path = "../../../frame/transaction-payment", defa pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false} pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false} pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false} +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false} pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false} pallet-uniques = { path = "../../../frame/uniques", default-features = false} pallet-vesting = { path = "../../../frame/vesting", default-features = false} @@ -212,6 +213,7 @@ std = [ "pallet-scheduler/std", "pallet-session-benchmarking?/std", "pallet-session/std", + "pallet-skip-feeless-payment/std", "pallet-society/std", "pallet-staking-runtime-api/std", "pallet-staking/std", @@ -308,6 +310,7 @@ runtime-benchmarks = [ "pallet-salary/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", "pallet-session-benchmarking/runtime-benchmarks", + "pallet-skip-feeless-payment/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", @@ -381,6 +384,7 @@ try-runtime = [ "pallet-salary/try-runtime", "pallet-scheduler/try-runtime", "pallet-session/try-runtime", + "pallet-skip-feeless-payment/try-runtime", "pallet-society/try-runtime", "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 620e89a65e5..e9adc48ff9c 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -569,6 +569,10 @@ impl pallet_asset_conversion_tx_payment::Config for Runtime { pallet_asset_conversion_tx_payment::AssetConversionAdapter; } +impl pallet_skip_feeless_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } @@ -1394,7 +1398,11 @@ where frame_system::CheckEra::::from(era), frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from(tip, None), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::from( + tip, None, + ), + ), ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { @@ -2134,6 +2142,7 @@ construct_runtime!( Statement: pallet_statement, Broker: pallet_broker, Mixnet: pallet_mixnet, + SkipFeelessPayment: pallet_skip_feeless_payment, } ); @@ -2160,7 +2169,10 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + pallet_skip_feeless_payment::SkipCheckIfFeeless< + Runtime, + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, + >, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index 68f80ab6e83..e4fb06b5dcd 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -26,6 +26,7 @@ pallet-asset-conversion = { path = "../../../frame/asset-conversion" } pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment" } sc-block-builder = { path = "../../../client/block-builder" } sc-client-api = { path = "../../../client/api" } sc-client-db = { path = "../../../client/db", features = ["rocksdb"]} diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index 22a8f5deb19..9940077c9da 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -78,7 +78,9 @@ pub fn signed_extra(nonce: Nonce, extra_fee: Balance) -> SignedExtra { frame_system::CheckEra::from(Era::mortal(256, 0)), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + pallet_skip_feeless_payment::SkipCheckIfFeeless::from( + pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::from(extra_fee, None), + ), ) } diff --git a/substrate/frame/examples/kitchensink/src/lib.rs b/substrate/frame/examples/kitchensink/src/lib.rs index 56117c59dc6..89759dd0bf6 100644 --- a/substrate/frame/examples/kitchensink/src/lib.rs +++ b/substrate/frame/examples/kitchensink/src/lib.rs @@ -206,6 +206,10 @@ pub mod pallet { impl Pallet { #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::set_foo_benchmark())] + /// Marks this call as feeless if `new_foo` is zero. + #[pallet::feeless_if(|_origin: &OriginFor, new_foo: &u32, _other_compact: &u128| -> bool { + *new_foo == 0 + })] pub fn set_foo( _: OriginFor, new_foo: u32, diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs index 859b9a327e4..ce2aa094279 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -124,6 +124,18 @@ pub fn expand_outer_dispatch( } } + impl #scrate::dispatch::CheckIfFeeless for RuntimeCall { + type Origin = #system_path::pallet_prelude::OriginFor<#runtime>; + fn is_feeless(&self, origin: &Self::Origin) -> bool { + match self { + #( + #pallet_attrs + #variant_patterns => call.is_feeless(origin), + )* + } + } + } + impl #scrate::traits::GetCallMetadata for RuntimeCall { fn get_call_metadata(&self) -> #scrate::traits::CallMetadata { use #scrate::traits::GetCallName; diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 68bf3e4874b..ec411891885 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1157,6 +1157,36 @@ pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } +/// Each dispatchable may be annotated with the `#[pallet::feeless_if($closure)]` attribute, +/// which explicitly defines the condition for the dispatchable to be feeless. +/// +/// The arguments for the closure must be the referenced arguments of the dispatchable function. +/// +/// The closure must return `bool`. +/// +/// ### Example +/// ```ignore +/// #[pallet::feeless_if(|_origin: &OriginFor, something: &u32| -> bool { +/// *something == 0 +/// })] +/// pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { +/// .... +/// } +/// ``` +/// +/// Please note that this only works for signed dispatchables and requires a signed extension +/// such as `SkipCheckIfFeeless` as defined in `pallet-skip-feeless-payment` to wrap the existing +/// payment extension. Else, this is completely ignored and the dispatchable is still charged. +/// +/// ### Macro expansion +/// +/// The macro implements the `CheckIfFeeless` trait on the dispatchable and calls the corresponding +/// closure in the implementation. +#[proc_macro_attribute] +pub fn feeless_if(_: TokenStream, _: TokenStream) -> TokenStream { + pallet_macro_stub() +} + /// Allows you to define some extra constants to be added into constant metadata. /// /// Item must be defined as: diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index ed6335159cd..cf302faafc7 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -241,6 +241,16 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }) .collect::>(); + let feeless_check = methods.iter().map(|method| &method.feeless_check).collect::>(); + let feeless_check_result = + feeless_check.iter().zip(args_name.iter()).map(|(feeless_check, arg_name)| { + if let Some(feeless_check) = feeless_check { + quote::quote!(#feeless_check(origin, #( #arg_name, )*)) + } else { + quote::quote!(false) + } + }); + quote::quote_spanned!(span => mod warnings { #( @@ -347,6 +357,23 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { } } + impl<#type_impl_gen> #frame_support::dispatch::CheckIfFeeless for #call_ident<#type_use_gen> + #where_clause + { + type Origin = #frame_system::pallet_prelude::OriginFor; + #[allow(unused_variables)] + fn is_feeless(&self, origin: &Self::Origin) -> bool { + match *self { + #( + Self::#fn_name { #( #args_name_pattern_ref, )* } => { + #feeless_check_result + }, + )* + Self::__Ignore(_, _) => unreachable!("__Ignore cannot be used"), + } + } + } + impl<#type_impl_gen> #frame_support::traits::GetCallName for #call_ident<#type_use_gen> #where_clause { diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 90631f264b9..519e1e61895 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -17,9 +17,10 @@ use super::{helper, InheritedCallWeightAttr}; use frame_support_procedural_tools::get_doc_literals; +use proc_macro2::Span; use quote::ToTokens; use std::collections::HashMap; -use syn::spanned::Spanned; +use syn::{spanned::Spanned, ExprClosure}; /// List of additional token to be used for parsing. mod keyword { @@ -30,6 +31,7 @@ mod keyword { syn::custom_keyword!(compact); syn::custom_keyword!(T); syn::custom_keyword!(pallet); + syn::custom_keyword!(feeless_if); } /// Definition of dispatchables typically `impl Pallet { ... }` @@ -82,13 +84,18 @@ pub struct CallVariantDef { pub docs: Vec, /// Attributes annotated at the top of the dispatchable function. pub attrs: Vec, + /// The optional `feeless_if` attribute on the `pallet::call`. + pub feeless_check: Option, } /// Attributes for functions in call impl block. -/// Parse for `#[pallet::weight(expr)]` or `#[pallet::call_index(expr)] pub enum FunctionAttr { + /// Parse for `#[pallet::call_index(expr)]` CallIndex(u8), + /// Parse for `#[pallet::weight(expr)]` Weight(syn::Expr), + /// Parse for `#[pallet::feeless_if(expr)]` + FeelessIf(Span, syn::ExprClosure), } impl syn::parse::Parse for FunctionAttr { @@ -115,6 +122,19 @@ impl syn::parse::Parse for FunctionAttr { return Err(syn::Error::new(index.span(), msg)) } Ok(FunctionAttr::CallIndex(index.base10_parse()?)) + } else if lookahead.peek(keyword::feeless_if) { + content.parse::()?; + let closure_content; + syn::parenthesized!(closure_content in content); + Ok(FunctionAttr::FeelessIf( + closure_content.span(), + closure_content.parse::().map_err(|e| { + let msg = "Invalid feeless_if attribute: expected a closure"; + let mut err = syn::Error::new(closure_content.span(), msg); + err.combine(e); + err + })?, + )) } else { Err(lookahead.error()) } @@ -138,28 +158,33 @@ impl syn::parse::Parse for ArgAttrIsCompact { } } -/// Check the syntax is `OriginFor` -pub fn check_dispatchable_first_arg_type(ty: &syn::Type) -> syn::Result<()> { - pub struct CheckDispatchableFirstArg; +/// Check the syntax is `OriginFor` or `&OriginFor`. +pub fn check_dispatchable_first_arg_type(ty: &syn::Type, is_ref: bool) -> syn::Result<()> { + pub struct CheckDispatchableFirstArg(bool); impl syn::parse::Parse for CheckDispatchableFirstArg { fn parse(input: syn::parse::ParseStream) -> syn::Result { + let is_ref = input.parse::().is_ok(); input.parse::()?; input.parse::()?; input.parse::()?; input.parse::]>()?; - Ok(Self) + Ok(Self(is_ref)) } } - syn::parse2::(ty.to_token_stream()).map_err(|e| { - let msg = "Invalid type: expected `OriginFor`"; - let mut err = syn::Error::new(ty.span(), msg); - err.combine(e); - err - })?; - - Ok(()) + let result = syn::parse2::(ty.to_token_stream()); + return match result { + Ok(CheckDispatchableFirstArg(has_ref)) if is_ref == has_ref => Ok(()), + _ => { + let msg = if is_ref { + "Invalid type: expected `&OriginFor`" + } else { + "Invalid type: expected `OriginFor`" + }; + return Err(syn::Error::new(ty.span(), msg)) + }, + } } impl CallDef { @@ -215,7 +240,7 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Typed(arg)) => { - check_dispatchable_first_arg_type(&arg.ty)?; + check_dispatchable_first_arg_type(&arg.ty, false)?; }, } @@ -227,16 +252,22 @@ impl CallDef { return Err(syn::Error::new(method.sig.span(), msg)) } - let (mut weight_attrs, mut call_idx_attrs): (Vec, Vec) = - helper::take_item_pallet_attrs(&mut method.attrs)?.into_iter().partition( - |attr| { - if let FunctionAttr::Weight(_) = attr { - true - } else { - false - } + let mut call_idx_attrs = vec![]; + let mut weight_attrs = vec![]; + let mut feeless_attrs = vec![]; + for attr in helper::take_item_pallet_attrs(&mut method.attrs)?.into_iter() { + match attr { + FunctionAttr::CallIndex(_) => { + call_idx_attrs.push(attr); }, - ); + FunctionAttr::Weight(_) => { + weight_attrs.push(attr); + }, + FunctionAttr::FeelessIf(span, _) => { + feeless_attrs.push((span, attr)); + }, + } + } if weight_attrs.is_empty() && dev_mode { // inject a default O(1) weight when dev mode is enabled and no weight has @@ -323,6 +354,73 @@ impl CallDef { let docs = get_doc_literals(&method.attrs); + if feeless_attrs.len() > 1 { + let msg = "Invalid pallet::call, there can only be one feeless_if attribute"; + return Err(syn::Error::new(feeless_attrs[1].0, msg)) + } + let feeless_check: Option = + feeless_attrs.pop().map(|(_, attr)| match attr { + FunctionAttr::FeelessIf(_, closure) => closure, + _ => unreachable!("checked during creation of the let binding"), + }); + + if let Some(ref feeless_check) = feeless_check { + if feeless_check.inputs.len() != args.len() + 1 { + let msg = "Invalid pallet::call, feeless_if closure must have same \ + number of arguments as the dispatchable function"; + return Err(syn::Error::new(feeless_check.span(), msg)) + } + + match feeless_check.inputs.first() { + None => { + let msg = "Invalid pallet::call, feeless_if closure must have at least origin arg"; + return Err(syn::Error::new(feeless_check.span(), msg)) + }, + Some(syn::Pat::Type(arg)) => { + check_dispatchable_first_arg_type(&arg.ty, true)?; + }, + _ => { + let msg = "Invalid pallet::call, feeless_if closure first argument must be a typed argument, \ + e.g. `origin: OriginFor`"; + return Err(syn::Error::new(feeless_check.span(), msg)) + }, + } + + for (feeless_arg, arg) in feeless_check.inputs.iter().skip(1).zip(args.iter()) { + let feeless_arg_type = + if let syn::Pat::Type(syn::PatType { ty, .. }) = feeless_arg.clone() { + if let syn::Type::Reference(pat) = *ty { + pat.elem.clone() + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a reference"; + return Err(syn::Error::new(ty.span(), msg)) + } + } else { + let msg = "Invalid pallet::call, feeless_if closure argument must be a type ascription pattern"; + return Err(syn::Error::new(feeless_arg.span(), msg)) + }; + + if feeless_arg_type != arg.2 { + let msg = + "Invalid pallet::call, feeless_if closure argument must have \ + a reference to the same type as the dispatchable function argument"; + return Err(syn::Error::new(feeless_arg.span(), msg)) + } + } + + let valid_return = match &feeless_check.output { + syn::ReturnType::Type(_, type_) => match *(type_.clone()) { + syn::Type::Path(syn::TypePath { path, .. }) => path.is_ident("bool"), + _ => false, + }, + _ => false, + }; + if !valid_return { + let msg = "Invalid pallet::call, feeless_if closure must return `bool`"; + return Err(syn::Error::new(feeless_check.output.span(), msg)) + } + } + methods.push(CallVariantDef { name: method.sig.ident.clone(), weight, @@ -331,6 +429,7 @@ impl CallDef { args, docs, attrs: method.attrs.clone(), + feeless_check, }); } else { let msg = "Invalid pallet::call, only method accepted"; diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index e6a090ebcae..e57227f9b40 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -54,6 +54,20 @@ pub trait Callable { // https://github.com/rust-lang/rust/issues/51331 pub type CallableCallFor = >::RuntimeCall; +/// Means to checks if the dispatchable is feeless. +/// +/// This is automatically implemented for all dispatchables during pallet expansion. +/// If a call is marked by [`#[pallet::feeless_if]`](`macro@frame_support_procedural::feeless_if`) +/// attribute, the corresponding closure is checked. +pub trait CheckIfFeeless { + /// The Origin type of the runtime. + type Origin; + + /// Checks if the dispatchable satisfies the feeless condition as defined by + /// [`#[pallet::feeless_if]`](`macro@frame_support_procedural::feeless_if`) + fn is_feeless(&self, origin: &Self::Origin) -> bool; +} + /// Origin for the System pallet. #[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo, MaxEncodedLen)] pub enum RawOrigin { diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index a01f3a01593..2ec3b24db0c 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -2227,9 +2227,10 @@ pub use frame_support_procedural::pallet; pub mod pallet_macros { pub use frame_support_procedural::{ call_index, compact, composite_enum, config, disable_frame_system_supertrait_check, error, - event, extra_constants, generate_deposit, generate_store, getter, hooks, import_section, - inherent, no_default, no_default_bounds, origin, pallet_section, storage_prefix, - storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, + event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, + import_section, inherent, no_default, no_default_bounds, origin, pallet_section, + storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, + whitelist_storage, }; /// Allows you to define the genesis configuration for the pallet. diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs new file mode 100644 index 00000000000..08aaf06a7ef --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr new file mode 100644 index 00000000000..9c13d59d793 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg1.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure must have same number of arguments as the dispatchable function + --> tests/pallet_ui/call_feeless_invalid_closure_arg1.rs:31:24 + | +31 | #[pallet::feeless_if(|| -> bool { true })] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs new file mode 100644 index 00000000000..b16b4b3ffd9 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: bool| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr new file mode 100644 index 00000000000..1c38ec23683 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg2.stderr @@ -0,0 +1,5 @@ +error: Invalid type: expected `&OriginFor` + --> tests/pallet_ui/call_feeless_invalid_closure_arg2.rs:31:28 + | +31 | #[pallet::feeless_if(|_: bool| -> bool { true })] + | ^^^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs new file mode 100644 index 00000000000..5f2230744ff --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor, _s: &u32| -> bool { true })] + pub fn foo(_: OriginFor, _something: u64) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr new file mode 100644 index 00000000000..1ad9588cead --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_arg3.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure argument must have a reference to the same type as the dispatchable function argument + --> tests/pallet_ui/call_feeless_invalid_closure_arg3.rs:31:43 + | +31 | #[pallet::feeless_if(|_: &OriginFor, _s: &u32| -> bool { true })] + | ^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs new file mode 100644 index 00000000000..1f0399a123c --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor| -> u32 { 0 })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr new file mode 100644 index 00000000000..a8c05242bde --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_closure_return.stderr @@ -0,0 +1,5 @@ +error: Invalid pallet::call, feeless_if closure must return `bool` + --> tests/pallet_ui/call_feeless_invalid_closure_return.rs:31:43 + | +31 | #[pallet::feeless_if(|_: &OriginFor| -> u32 { 0 })] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs new file mode 100644 index 00000000000..26bd8a600ab --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(0)] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr new file mode 100644 index 00000000000..add3decbf16 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/call_feeless_invalid_type.stderr @@ -0,0 +1,11 @@ +error: Invalid feeless_if attribute: expected a closure + --> tests/pallet_ui/call_feeless_invalid_type.rs:31:24 + | +31 | #[pallet::feeless_if(0)] + | ^ + +error: expected `|` + --> tests/pallet_ui/call_feeless_invalid_type.rs:31:24 + | +31 | #[pallet::feeless_if(0)] + | ^ diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr index eec5e33ccbd..1809fcb6ed9 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_attr.stderr @@ -1,4 +1,4 @@ -error: expected `weight` or `call_index` +error: expected one of: `weight`, `call_index`, `feeless_if` --> tests/pallet_ui/call_invalid_attr.rs:31:13 | 31 | #[pallet::weird_attr] diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr index 99146c0563a..c04729a2438 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr @@ -3,9 +3,3 @@ error: Invalid type: expected `OriginFor` | 34 | pub fn foo(origin: u8) {} | ^^ - -error: expected `OriginFor` - --> tests/pallet_ui/call_invalid_origin_type.rs:34:22 - | -34 | pub fn foo(origin: u8) {} - | ^^ diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs b/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs new file mode 100644 index 00000000000..566b7c65cc7 --- /dev/null +++ b/substrate/frame/support/test/tests/pallet_ui/pass/feeless_call.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[frame_support::pallet(dev_mode)] +mod pallet { + use frame_support::pallet_prelude::DispatchResult; + use frame_system::pallet_prelude::OriginFor; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(core::marker::PhantomData); + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_: &OriginFor| -> bool { true })] + pub fn foo(_: OriginFor) -> DispatchResult { Ok(()) } + } +} + +fn main() { +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml new file mode 100644 index 00000000000..cfb814e2e38 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "pallet-skip-feeless-payment" +version = "1.0.0-dev" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +description = "Pallet to skip payments for calls annotated with `feeless_if` if the respective conditions are satisfied." + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +# Substrate dependencies +sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-std = { path = "../../../primitives/std", default-features = false} + +frame-support = { path = "../../support", default-features = false} +frame-system = { path = "../../system", default-features = false} + +# Other dependencies +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs new file mode 100644 index 00000000000..923c7e7ebc2 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -0,0 +1,145 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +//! # Skip Feeless Payment Pallet +//! +//! This pallet allows runtimes that include it to skip payment of transaction fees for +//! dispatchables marked by [`#[pallet::feeless_if]`](`macro@ +//! frame_support::pallet_prelude::feeless_if`). +//! +//! ## Overview +//! +//! It does this by wrapping an existing [`SignedExtension`] implementation (e.g. +//! [`pallet-transaction-payment`]) and checking if the dispatchable is feeless before applying the +//! wrapped extension. If the dispatchable is indeed feeless, the extension is skipped and a custom +//! event is emitted instead. Otherwise, the extension is applied as usual. +//! +//! +//! ## Integration +//! +//! This pallet wraps an existing transaction payment pallet. This means you should both pallets +//! in your `construct_runtime` macro and include this pallet's +//! [`SignedExtension`] ([`SkipCheckIfFeeless`]) that would accept the existing one as an argument. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{CheckIfFeeless, DispatchResult}, + traits::{IsType, OriginTrait}, +}; +use scale_info::TypeInfo; +use sp_runtime::{ + traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, + transaction_validity::TransactionValidityError, +}; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transaction fee was skipped. + FeeSkipped { who: T::AccountId }, + } +} + +/// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. +#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] +#[scale_info(skip_type_params(T))] +pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); + +impl sp_std::fmt::Debug for SkipCheckIfFeeless { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "SkipCheckIfFeeless<{:?}>", self.0.encode()) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } +} + +impl SkipCheckIfFeeless { + /// utility constructor. Used only in client/factory code. + pub fn from(s: S) -> Self { + Self(s, sp_std::marker::PhantomData) + } +} + +impl> SignedExtension + for SkipCheckIfFeeless +where + S::Call: CheckIfFeeless>, +{ + type AccountId = T::AccountId; + type Call = S::Call; + type AdditionalSigned = S::AdditionalSigned; + type Pre = (Self::AccountId, Option<::Pre>); + const IDENTIFIER: &'static str = "SkipCheckIfFeeless"; + + fn additional_signed(&self) -> Result { + self.0.additional_signed() + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + if call.is_feeless(&::RuntimeOrigin::signed(who.clone())) { + Ok((who.clone(), None)) + } else { + Ok((who.clone(), Some(self.0.pre_dispatch(who, call, info, len)?))) + } + } + + fn post_dispatch( + pre: Option, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + if let Some(pre) = pre { + if let Some(pre) = pre.1 { + S::post_dispatch(Some(pre), info, post_info, len, result)?; + } else { + Pallet::::deposit_event(Event::::FeeSkipped { who: pre.0 }); + } + } + Ok(()) + } +} diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs new file mode 100644 index 00000000000..5c540c3e459 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/mock.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as pallet_skip_feeless_payment; + +use frame_support::{derive_impl, parameter_types}; +use frame_system as system; + +type Block = frame_system::mocking::MockBlock; +type AccountId = u64; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Runtime { + type Block = Block; +} + +impl Config for Runtime { + type RuntimeEvent = RuntimeEvent; +} + +parameter_types! { + pub static PreDispatchCount: u32 = 0; +} + +#[derive(Clone, Eq, PartialEq, Debug, Encode, Decode, TypeInfo)] +pub struct DummyExtension; + +impl SignedExtension for DummyExtension { + type AccountId = AccountId; + type Call = RuntimeCall; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "DummyExtension"; + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result { + PreDispatchCount::mutate(|c| *c += 1); + Ok(()) + } +} + +#[frame_support::pallet(dev_mode)] +pub mod pallet_dummy { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::call] + impl Pallet { + #[pallet::feeless_if(|_origin: &OriginFor, data: &u32| -> bool { + *data == 0 + })] + pub fn aux(_origin: OriginFor, #[pallet::compact] _data: u32) -> DispatchResult { + unreachable!() + } + } +} + +impl pallet_dummy::Config for Runtime {} + +frame_support::construct_runtime!( + pub struct Runtime { + System: system, + SkipFeeless: pallet_skip_feeless_payment, + DummyPallet: pallet_dummy, + } +); diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs new file mode 100644 index 00000000000..4b4dd699741 --- /dev/null +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/tests.rs @@ -0,0 +1,33 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate::mock::{pallet_dummy::Call, DummyExtension, PreDispatchCount, Runtime, RuntimeCall}; +use frame_support::dispatch::DispatchInfo; + +#[test] +fn skip_feeless_payment_works() { + let call = RuntimeCall::DummyPallet(Call::::aux { data: 1 }); + SkipCheckIfFeeless::::from(DummyExtension) + .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(PreDispatchCount::get(), 1); + + let call = RuntimeCall::DummyPallet(Call::::aux { data: 0 }); + SkipCheckIfFeeless::::from(DummyExtension) + .pre_dispatch(&0, &call, &DispatchInfo::default(), 0) + .unwrap(); + assert_eq!(PreDispatchCount::get(), 1); +} -- GitLab From 29654a4d7154296e8e6cb2a067896490d67d96a1 Mon Sep 17 00:00:00 2001 From: gupnik <17176722+gupnik@users.noreply.github.com> Date: Mon, 13 Nov 2023 19:56:33 +0530 Subject: [PATCH 034/199] Skip zombienet CI job until PolkadotJS includes `SkipCheckIfFeeless` extension (#2294) --- .gitlab/pipeline/zombienet/substrate.yml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 9fb2f161ad7..9e14ebe0852 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -38,13 +38,14 @@ tags: - zombienet-polkadot-integration-test -zombienet-substrate-0000-block-building: - extends: - - .zombienet-substrate-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/0000-block-building" - --test="block-building.zndsl" +# Skip this one until PolkadotJS includes `SkipCheckIfFeeless` extension +# zombienet-substrate-0000-block-building: +# extends: +# - .zombienet-substrate-common +# script: +# - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh +# --local-dir="${LOCAL_DIR}/0000-block-building" +# --test="block-building.zndsl" zombienet-substrate-0001-basic-warp-sync: extends: -- GitLab From 18257373b313fcfad4f7a2ab2cb02cb8bfc5b8f1 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 13 Nov 2023 17:16:55 +0200 Subject: [PATCH 035/199] pallet-xcm: enhance `reserve_transfer_assets` to support remote reserves (#1672) ## Motivation `pallet-xcm` is the main user-facing interface for XCM functionality, including assets manipulation functions like `teleportAssets()` and `reserve_transfer_assets()` calls. While `teleportAsset()` works both ways, `reserve_transfer_assets()` works only for sending reserve-based assets to a remote destination and beneficiary when the reserve is the _local chain_. ## Solution This PR enhances `pallet_xcm::(limited_)reserve_withdraw_assets` to support transfers when reserves are other chains. This will allow complete, **bi-directional** reserve-based asset transfers user stories using `pallet-xcm`. Enables following scenarios: - transferring assets with local reserve (was previously supported iff asset used as fee also had local reserve - now it works in all cases), - transferring assets with reserve on destination, - transferring assets with reserve on remote/third-party chain (iff assets and fees have same remote reserve), - transferring assets with reserve different than the reserve of the asset to be used as fees - meaning can be used to transfer random asset with local/dest reserve while using DOT for fees on all involved chains, even if DOT local/dest reserve doesn't match asset reserve, - transferring assets with any type of local/dest reserve while using fees which can be teleported between involved chains. All of the above is done by pallet inner logic without the user having to specify which scenario/reserves/teleports/etc. The correct scenario and corresponding XCM programs are identified, and respectively, built automatically based on runtime configuration of trusted teleporters and trusted reserves. #### Current limitations: - while `fees` and "non-fee" `assets` CAN have different reserves (or fees CAN be teleported), the remaining "non-fee" `assets` CANNOT, among themselves, have different reserve locations (this is also implicitly enforced by `MAX_ASSETS_FOR_TRANSFER=2`, but this can be safely increased in the future). - `fees` and "non-fee" `assets` CANNOT have **different remote** reserves (this could also be supported in the future, but adds even more complexity while possibly not being worth it - we'll see what the future holds). Fixes https://github.com/paritytech/polkadot-sdk/issues/1584 Fixes https://github.com/paritytech/polkadot-sdk/issues/2055 --------- Co-authored-by: Francisco Aguirre Co-authored-by: Branislav Kontur --- Cargo.lock | 5 + .../runtime/src/xcm_config.rs | 7 - .../assets/asset-hub-rococo/src/lib.rs | 6 +- .../assets/asset-hub-westend/src/lib.rs | 6 +- .../assets/asset-hub-wococo/src/lib.rs | 6 +- .../bridges/bridge-hub-rococo/src/lib.rs | 2 +- .../bridges/bridge-hub-westend/src/lib.rs | 2 +- .../bridges/bridge-hub-wococo/src/lib.rs | 2 +- .../parachains/testing/penpal/Cargo.toml | 1 + .../parachains/testing/penpal/src/lib.rs | 12 +- .../emulated/chains/relays/westend/src/lib.rs | 2 +- .../emulated/common/src/impls.rs | 14 +- .../tests/assets/asset-hub-rococo/Cargo.toml | 4 +- .../tests/assets/asset-hub-rococo/src/lib.rs | 25 +- .../src/tests/reserve_transfer.rs | 495 +++--- .../asset-hub-rococo/src/tests/teleport.rs | 18 +- .../tests/assets/asset-hub-westend/src/lib.rs | 4 +- .../src/tests/reserve_transfer.rs | 359 +---- .../asset-hub-westend/src/tests/teleport.rs | 2 +- .../assets/asset-hub-kusama/src/lib.rs | 36 +- .../assets/asset-hub-kusama/src/xcm_config.rs | 7 - .../assets/asset-hub-kusama/tests/tests.rs | 40 +- .../assets/asset-hub-polkadot/src/lib.rs | 36 +- .../asset-hub-polkadot/src/xcm_config.rs | 7 - .../assets/asset-hub-polkadot/tests/tests.rs | 40 +- .../assets/asset-hub-rococo/src/lib.rs | 36 +- .../assets/asset-hub-rococo/src/xcm_config.rs | 7 - .../assets/asset-hub-rococo/tests/tests.rs | 35 +- .../assets/asset-hub-westend/src/lib.rs | 36 +- .../asset-hub-westend/src/xcm_config.rs | 7 - .../assets/asset-hub-westend/tests/tests.rs | 35 +- .../runtimes/assets/test-utils/src/lib.rs | 15 + .../assets/test-utils/src/test_cases.rs | 288 +++- .../test-utils/src/test_cases_over_bridge.rs | 88 +- .../parachains/runtimes/bridge-hubs/README.md | 16 +- .../bridge-hubs/bridge-hub-kusama/src/lib.rs | 26 +- .../bridge-hub-kusama/src/xcm_config.rs | 7 - .../bridge-hub-kusama/tests/tests.rs | 6 - .../bridge-hub-polkadot/src/lib.rs | 26 +- .../bridge-hub-polkadot/src/xcm_config.rs | 7 - .../bridge-hub-polkadot/tests/tests.rs | 6 - .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 26 +- .../bridge-hub-rococo/src/xcm_config.rs | 7 - .../bridge-hub-rococo/tests/tests.rs | 12 - .../bridge-hubs/bridge-hub-westend/src/lib.rs | 26 +- .../bridge-hub-westend/src/xcm_config.rs | 7 - .../bridge-hub-westend/tests/tests.rs | 6 - .../collectives-polkadot/src/lib.rs | 26 +- .../collectives-polkadot/src/xcm_config.rs | 7 - .../contracts/contracts-rococo/src/lib.rs | 27 +- .../contracts-rococo/src/xcm_config.rs | 7 - .../runtimes/testing/penpal/src/lib.rs | 11 +- .../runtimes/testing/penpal/src/xcm_config.rs | 84 +- .../testing/rococo-parachain/src/lib.rs | 7 - cumulus/scripts/bridges_rococo_westend.sh | 28 +- cumulus/xcm/xcm-emulator/src/lib.rs | 8 +- polkadot/runtime/parachains/src/paras/mod.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 32 +- polkadot/runtime/rococo/src/xcm_config.rs | 7 - .../runtime/test-runtime/src/xcm_config.rs | 7 - polkadot/runtime/westend/src/lib.rs | 28 +- polkadot/runtime/westend/src/xcm_config.rs | 7 - polkadot/xcm/pallet-xcm/Cargo.toml | 10 +- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 165 +- polkadot/xcm/pallet-xcm/src/lib.rs | 551 ++++++- polkadot/xcm/pallet-xcm/src/mock.rs | 203 ++- .../pallet-xcm/src/tests/assets_transfer.rs | 1405 +++++++++++++++++ .../pallet-xcm/src/{tests.rs => tests/mod.rs} | 375 +---- polkadot/xcm/src/v3/multilocation.rs | 66 + polkadot/xcm/xcm-builder/src/barriers.rs | 13 +- .../xcm/xcm-builder/src/tests/pay/mock.rs | 7 - polkadot/xcm/xcm-builder/tests/mock/mod.rs | 7 - polkadot/xcm/xcm-executor/Cargo.toml | 2 + polkadot/xcm/xcm-executor/src/lib.rs | 8 +- .../xcm-executor/src/traits/asset_transfer.rs | 90 ++ polkadot/xcm/xcm-executor/src/traits/mod.rs | 6 +- .../xcm-simulator/example/src/parachain.rs | 7 - .../xcm-simulator/example/src/relay_chain.rs | 7 - .../xcm/xcm-simulator/fuzzer/src/parachain.rs | 7 - .../xcm-simulator/fuzzer/src/relay_chain.rs | 7 - 80 files changed, 3650 insertions(+), 1437 deletions(-) create mode 100644 polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs rename polkadot/xcm/pallet-xcm/src/{tests.rs => tests/mod.rs} (68%) create mode 100644 polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs diff --git a/Cargo.lock b/Cargo.lock index dbdc2f856c4..4a45d5c602e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -901,9 +901,11 @@ dependencies = [ "pallet-asset-conversion", "pallet-assets", "pallet-balances", + "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", + "penpal-runtime", "rococo-runtime", "rococo-system-emulated-network", "sp-runtime", @@ -11183,6 +11185,7 @@ dependencies = [ "frame-support", "frame-system", "log", + "pallet-assets", "pallet-balances", "parity-scale-codec", "polkadot-parachain-primitives", @@ -11630,6 +11633,7 @@ dependencies = [ "frame-support", "parachains-common", "penpal-runtime", + "rococo-emulated-chain", "serde_json", "sp-core", "sp-runtime", @@ -18416,6 +18420,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "parity-scale-codec", + "scale-info", "sp-arithmetic", "sp-core", "sp-io", diff --git a/cumulus/parachain-template/runtime/src/xcm_config.rs b/cumulus/parachain-template/runtime/src/xcm_config.rs index 353f68d22e3..752137c96f1 100644 --- a/cumulus/parachain-template/runtime/src/xcm_config.rs +++ b/cumulus/parachain-template/runtime/src/xcm_config.rs @@ -150,11 +150,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -180,8 +175,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 0580d61eae9..f94c4c3d255 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; @@ -51,5 +51,5 @@ decl_test_parachains! { // AssetHubRococo implementation impl_accounts_helpers_for_parachain!(AssetHubRococo); -impl_assert_events_helpers_for_parachain!(AssetHubRococo); -impl_assets_helpers_for_system_parachain!(AssetHubRococo, Rococo); +impl_assert_events_helpers_for_parachain!(AssetHubRococo, false); +impl_assets_helpers_for_parachain!(AssetHubRococo, Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 804b727c33f..73d777247a5 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, }; use westend_emulated_chain::Westend; @@ -51,5 +51,5 @@ decl_test_parachains! { // AssetHubWestend implementation impl_accounts_helpers_for_parachain!(AssetHubWestend); -impl_assert_events_helpers_for_parachain!(AssetHubWestend); -impl_assets_helpers_for_system_parachain!(AssetHubWestend, Westend); +impl_assert_events_helpers_for_parachain!(AssetHubWestend, false); +impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs index 677ca1763cf..38a6ece3472 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs @@ -19,7 +19,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_system_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, }; use wococo_emulated_chain::Wococo; @@ -49,5 +49,5 @@ decl_test_parachains! { // AssetHubWococo implementation impl_accounts_helpers_for_parachain!(AssetHubWococo); -impl_assert_events_helpers_for_parachain!(AssetHubWococo); -impl_assets_helpers_for_system_parachain!(AssetHubWococo, Wococo); +impl_assert_events_helpers_for_parachain!(AssetHubWococo, false); +impl_assets_helpers_for_parachain!(AssetHubWococo, Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index d7630954c86..f4557021f62 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -46,4 +46,4 @@ decl_test_parachains! { // BridgeHubRococo implementation impl_accounts_helpers_for_parachain!(BridgeHubRococo); -impl_assert_events_helpers_for_parachain!(BridgeHubRococo); +impl_assert_events_helpers_for_parachain!(BridgeHubRococo, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index 436b65cb916..1f1126d4565 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -46,4 +46,4 @@ decl_test_parachains! { // BridgeHubWestend implementation impl_accounts_helpers_for_parachain!(BridgeHubWestend); -impl_assert_events_helpers_for_parachain!(BridgeHubWestend); +impl_assert_events_helpers_for_parachain!(BridgeHubWestend, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs index 6807a2ab8c8..e643f104aa3 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs @@ -44,4 +44,4 @@ decl_test_parachains! { // BridgeHubWococo implementation impl_accounts_helpers_for_parachain!(BridgeHubWococo); -impl_assert_events_helpers_for_parachain!(BridgeHubWococo); +impl_assert_events_helpers_for_parachain!(BridgeHubWococo, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index 42aaee3f102..c55b10d7180 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -22,3 +22,4 @@ parachains-common = { path = "../../../../../../../parachains/common" } cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } emulated-integration-tests-common = { path = "../../../../common", default-features = false } penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } +rococo-emulated-chain = { path = "../../../relays/rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 8709d4e9196..537f96f45b4 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -21,8 +21,10 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ - impl_assert_events_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, + impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, }; +use rococo_emulated_chain::Rococo; // Penpal Parachain declaration decl_test_parachains! { @@ -40,6 +42,7 @@ decl_test_parachains! { pallets = { PolkadotXcm: penpal_runtime::PolkadotXcm, Assets: penpal_runtime::Assets, + Balances: penpal_runtime::Balances, } }, pub struct PenpalB { @@ -56,10 +59,13 @@ decl_test_parachains! { pallets = { PolkadotXcm: penpal_runtime::PolkadotXcm, Assets: penpal_runtime::Assets, + Balances: penpal_runtime::Balances, } }, } // Penpal implementation -impl_assert_events_helpers_for_parachain!(PenpalA); -impl_assert_events_helpers_for_parachain!(PenpalB); +impl_accounts_helpers_for_parachain!(PenpalA); +impl_assets_helpers_for_parachain!(PenpalA, Rococo); +impl_assert_events_helpers_for_parachain!(PenpalA, true); +impl_assert_events_helpers_for_parachain!(PenpalB, true); diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs index af45d8db4e6..d4ba1b6cfe7 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -30,7 +30,7 @@ decl_test_relay_chains! { on_init = (), runtime = westend_runtime, core = { - SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, //TODO: rename to SovereignAccountOf, + SovereignAccountOf: westend_runtime::xcm_config::LocationConverter, }, pallets = { XcmPallet: westend_runtime::XcmPallet, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 6c99c1614db..82f27b93200 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -399,7 +399,7 @@ macro_rules! impl_accounts_helpers_for_parachain { #[macro_export] macro_rules! impl_assert_events_helpers_for_parachain { - ( $chain:ident ) => { + ( $chain:ident, $ignore_weight:expr ) => { $crate::impls::paste::paste! { type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; @@ -412,7 +412,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Complete(weight) } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -434,7 +434,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Incomplete(weight, error) } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -490,7 +490,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. }) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -510,7 +510,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: false, weight_used: weight, .. }) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -541,7 +541,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { vec![ [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. } ) => { - weight: $crate::impls::weight_within_threshold( + weight: $ignore_weight || $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -556,7 +556,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { } #[macro_export] -macro_rules! impl_assets_helpers_for_system_parachain { +macro_rules! impl_assets_helpers_for_parachain { ( $chain:ident, $relay_chain:ident ) => { $crate::impls::paste::paste! { impl $chain { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 6e592f04ba1..23f80f33f78 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -17,6 +17,7 @@ frame-support = { path = "../../../../../../../substrate/frame/support", default pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} +pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } # Polkadot xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} @@ -28,5 +29,6 @@ rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } parachains-common = { path = "../../../../../../parachains/common" } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } -emulated-integration-tests-common = { path = "../../../common", default-features = false} +emulated-integration-tests-common = { path = "../../../common", default-features = false } +penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } rococo-system-emulated-network ={ path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 11380cd1e2d..3ff8c37c646 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -61,18 +61,20 @@ pub const ASSET_MIN_BALANCE: u128 = 1000; pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; +pub type RelayToParaTest = Test; pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; +pub type ParaToSystemParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests -pub fn relay_test_args(amount: Balance) -> TestArgs { +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests +pub fn relay_test_args( + dest: MultiLocation, + beneficiary_id: AccountId32, + amount: Balance, +) -> TestArgs { TestArgs { - dest: Rococo::child_location_of(AssetHubRococo::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubRococoReceiver::get().into(), - } - .into(), + dest, + beneficiary: AccountId32Junction { network: None, id: beneficiary_id.into() }.into(), amount, assets: (Here, amount).into(), asset_id: None, @@ -81,13 +83,14 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain accross integraton tests -pub fn system_para_test_args( +/// Returns a `TestArgs` instance to be used by parachains across integration tests +pub fn para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, amount: Balance, assets: MultiAssets, asset_id: Option, + fee_asset_item: u32, ) -> TestArgs { TestArgs { dest, @@ -95,7 +98,7 @@ pub fn system_para_test_args( amount, assets, asset_id, - fee_asset_item: 0, + fee_asset_item, weight_limit: WeightLimit::Unlimited, } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 76d93b2dbdb..d0e9b72176b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -15,37 +15,45 @@ use crate::*; use asset_hub_rococo_runtime::xcm_config::XcmConfig as AssetHubRococoXcmConfig; +use penpal_runtime::xcm_config::XcmConfig as PenpalRococoXcmConfig; use rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig; -fn relay_origin_assertions(t: RelayToSystemParaTest) { +fn relay_to_para_sender_assertions(t: RelayToParaTest) { type RuntimeEvent = ::RuntimeEvent; - Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(630_092_000, 6_196))); + Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); assert_expected_events!( Rococo, vec![ - // Amount to reserve transfer is transferred to System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { from: *from == t.sender.account_id, to: *to == Rococo::sovereign_account_id_of( t.args.dest ), - amount: *amount == t.args.amount, + amount: *amount == t.args.amount, }, ] ); } -fn system_para_dest_assertions_incomplete(_t: RelayToSystemParaTest) { - AssetHubRococo::assert_dmp_queue_incomplete(Some(Weight::from_parts(57_185_000, 3504))); -} - -fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) { - AssetHubRococo::assert_xcm_pallet_attempted_error(Some(XcmError::Barrier)) +fn relay_to_para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); } -fn system_para_to_para_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -56,7 +64,7 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Balances( pallet_balances::Event::Transfer { from, to, amount } ) => { @@ -70,7 +78,64 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { ); } -fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + PenpalA::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + + assert_expected_events!( + PenpalA, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of( + AssetHubRococo::sibling_location_of(PenpalA::para_id()), + ); + + assert_expected_events!( + AssetHubRococo, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahr.clone().into(), + amount: *amount == t.args.amount, + }, + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -81,7 +146,7 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubRococo, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Assets( pallet_assets::Event::Transferred { asset_id, from, to, amount } ) => { @@ -96,29 +161,22 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { ); } -fn relay_limited_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn relay_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) +fn system_para_to_para_assets_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::Assets(pallet_assets::Event::Issued { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); } -fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( +fn relay_to_para_limited_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { + ::XcmPallet::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.beneficiary.into()), @@ -128,16 +186,6 @@ fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> Disp ) } -fn system_para_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, @@ -149,101 +197,108 @@ fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) ) } -fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( +fn para_to_system_para_limited_reserve_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, bx!(t.args.dest.into()), bx!(t.args.beneficiary.into()), bx!(t.args.assets.into()), t.args.fee_asset_item, + t.args.weight_limit, ) } -/// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't -/// work +/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work #[test] -fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain +fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { + let signed_origin = ::RuntimeOrigin::signed(RococoSender::get().into()); + let destination = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubRococoReceiver::get().into() }.into(); let amount_to_send: Balance = ROCOCO_ED * 1000; - let test_args = TestContext { - sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_limited_reserve_transfer_assets); - test.assert(); - - let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + let assets: MultiAssets = (Here, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + Rococo::execute_with(|| { + let result = ::XcmPallet::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 99, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); } -/// Limited Reserve Transfers of native asset from System Parachain to Relay Chain shoudln't work +/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work #[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { +fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { // Init values for System Parachain + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); let destination = AssetHubRococo::parent_location(); let beneficiary_id = RococoReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); + let assets: MultiAssets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + AssetHubRococo::execute_with(|| { + let result = + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); + }); } -/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work +/// Reserve Transfers of native asset from Relay to Parachain should work #[test] -fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain +fn reserve_transfer_native_asset_from_relay_to_para() { + // Init values for Relay + let destination = Rococo::child_location_of(PenpalA::para_id()); + let beneficiary_id = PenpalAReceiver::get(); let amount_to_send: Balance = ROCOCO_ED * 1000; + let test_args = TestContext { sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + receiver: PenpalAReceiver::get(), + args: relay_test_args(destination, beneficiary_id, amount_to_send), }; - let mut test = RelayToSystemParaTest::new(test_args); + let mut test = RelayToParaTest::new(test_args); let sender_balance_before = test.sender.balance; let receiver_balance_before = test.receiver.balance; - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions_incomplete); - test.set_dispatchable::(relay_reserve_transfer_assets); + test.set_assertion::(relay_to_para_sender_assertions); + test.set_assertion::(relay_to_para_receiver_assertions); + test.set_dispatchable::(relay_to_para_limited_reserve_transfer_assets); test.assert(); let delivery_fees = Rococo::execute_with(|| { @@ -255,44 +310,15 @@ fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { let sender_balance_after = test.sender.balance; let receiver_balance_after = test.receiver.balance; + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } -/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work -#[test] -fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from System Parachain to Parachain should work #[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_para() { +fn reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let beneficiary_id = PenpalAReceiver::get(); @@ -302,20 +328,21 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(system_para_to_para_receiver_assertions); test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubRococo::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::< @@ -323,117 +350,153 @@ fn limited_reserve_transfer_native_asset_from_system_para_to_para() { >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } -/// Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from Parachain to System Parachain should work #[test] -fn reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); +fn reserve_transfer_native_asset_from_para_to_system_para() { + // Init values for Penpal Parachain + let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + sender: PenpalASender::get(), + receiver: AssetHubRococoReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; - let mut test = SystemParaToParaTest::new(test_args); + let mut test = ParaToSystemParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); + // fund the Penpal's SA on AHR with the native tokens held in reserve + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount_to_send * 2)]); + + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_system_para_limited_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; - let delivery_fees = AssetHubRococo::execute_with(|| { + let delivery_fees = PenpalA::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, + ::XcmSender, >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } -/// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work +/// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should +/// work #[test] -fn limited_reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account +fn reserve_transfer_assets_from_system_para_to_para() { + // Force create asset on AssetHubRococo and PenpalA from Relay Chain AssetHubRococo::force_create_and_mint_asset( ASSET_ID, ASSET_MIN_BALANCE, - true, + false, AssetHubRococoSender::get(), Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, + ASSET_MIN_BALANCE * 1_000_000, + ); + PenpalA::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + false, + PenpalASender::get(), + Some(Weight::from_parts(1_019_445_000, 200_000)), + 0, ); // Init values for System Parachain let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { + let fee_amount_to_send = ASSET_HUB_ROCOCO_ED * 1000; + let asset_amount_to_send = ASSET_MIN_BALANCE * 1000; + let assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), asset_amount_to_send) + .into(), + ] + .into(); + let fee_asset_index = assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + let para_test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args( + destination, + beneficiary_id, + asset_amount_to_send, + assets, + None, + fee_asset_index, + ), }; - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let mut test = SystemParaToParaTest::new(para_test_args); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); -} + // Create SA-of-Penpal-on-AHR with ED. + let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); + let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); + AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), ROCOCO_ED)]); -/// Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubRococo::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubRococoSender::get(), - Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, - ); + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - // Init values for System Parachain - let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); + let sender_assets_before = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubRococoSender::get()) + }); + let receiver_assets_before = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalAReceiver::get()) + }); - let system_para_test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; + test.set_assertion::(system_para_to_para_assets_sender_assertions); + test.set_assertion::(system_para_to_para_assets_receiver_assertions); + test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); + test.assert(); - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert!(sender_balance_after < sender_balance_before); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + + let sender_assets_after = AssetHubRococo::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubRococoSender::get()) + }); + let receiver_assets_after = PenpalA::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalAReceiver::get()) + }); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_reserve_transfer_assets); - system_para_test.assert(); + // Sender's balance is reduced + assert_eq!(sender_assets_before - asset_amount_to_send, sender_assets_after); + // Receiver's balance is increased + assert!(receiver_assets_after > receiver_assets_before); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 0d2ca685247..f8017f7a1c5 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -51,7 +51,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Rococo, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, @@ -157,10 +157,12 @@ fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { fn limited_teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let test_args = TestContext { sender: RococoSender::get(), receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + args: relay_test_args(dest, beneficiary_id, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -204,7 +206,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -245,7 +247,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -278,10 +280,12 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { fn teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = ROCOCO_ED * 1000; + let dest = Rococo::child_location_of(AssetHubRococo::para_id()); + let beneficiary_id = AssetHubRococoReceiver::get(); let test_args = TestContext { sender: RococoSender::get(), receiver: AssetHubRococoReceiver::get(), - args: relay_test_args(amount_to_send), + args: relay_test_args(dest, beneficiary_id, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -325,7 +329,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -366,7 +370,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubRococoSender::get(), receiver: RococoReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index e52ad448c0b..83a867e6ae3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -65,7 +65,7 @@ pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; pub type SystemParaToParaTest = Test; -/// Returns a `TestArgs` instance to de used for the Relay Chain accross integraton tests +/// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests pub fn relay_test_args(amount: Balance) -> TestArgs { TestArgs { dest: Westend::child_location_of(AssetHubWestend::para_id()), @@ -82,7 +82,7 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to de used for the System Parachain accross integraton tests +/// Returns a `TestArgs` instance to be used for the System Parachain across integration tests pub fn system_para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 19a203897ad..5b2c648b7b0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -15,37 +15,8 @@ use crate::*; use asset_hub_westend_runtime::xcm_config::XcmConfig; -use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; -fn relay_origin_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - Westend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(629_384_000, 6_196))); - - assert_expected_events!( - Westend, - vec![ - // Amount to reserve transfer is transferred to System Parachain's Sovereign account - RuntimeEvent::Balances(pallet_balances::Event::Transfer { from, to, amount }) => { - from: *from == t.sender.account_id, - to: *to == Westend::sovereign_account_id_of( - t.args.dest - ), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn system_para_dest_assertions(_t: RelayToSystemParaTest) { - AssetHubWestend::assert_dmp_queue_incomplete(Some(Weight::from_parts(31_352_000, 1489))); -} - -fn system_para_to_relay_assertions(_t: SystemParaToRelayTest) { - AssetHubWestend::assert_xcm_pallet_attempted_error(Some(XcmError::Barrier)) -} - -fn system_para_to_para_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -56,7 +27,7 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Balances( pallet_balances::Event::Transfer { from, to, amount } ) => { @@ -70,6 +41,19 @@ fn system_para_to_para_assertions(t: SystemParaToParaTest) { ); } +fn para_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalA, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -81,7 +65,7 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { assert_expected_events!( AssetHubWestend, vec![ - // Amount to reserve transfer is transferred to Parachain's Sovereing account + // Amount to reserve transfer is transferred to Parachain's Sovereign account RuntimeEvent::Assets( pallet_assets::Event::Transferred { asset_id, from, to, amount } ) => { @@ -96,48 +80,6 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { ); } -fn relay_limited_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn relay_reserve_transfer_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -fn system_para_limited_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::limited_reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - -fn system_para_reserve_transfer_assets(t: SystemParaToRelayTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, @@ -149,187 +91,72 @@ fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) ) } -fn system_para_to_para_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { - ::PolkadotXcm::reserve_transfer_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - ) -} - -/// Limited Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't -/// work -#[test] -fn limited_reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions); - test.set_dispatchable::(relay_limited_reserve_transfer_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Relay Chain shoudln't work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - /// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work #[test] fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { - // Init values for Relay Chain + let signed_origin = ::RuntimeOrigin::signed(WestendSender::get().into()); + let destination = Westend::child_location_of(AssetHubWestend::para_id()); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: AssetHubWestendReceiver::get().into() }.into(); let amount_to_send: Balance = WESTEND_ED * 1000; - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(system_para_dest_assertions); - test.set_dispatchable::(relay_reserve_transfer_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + let assets: MultiAssets = (Here, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + Westend::execute_with(|| { + let result = ::XcmPallet::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 99, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); } /// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work #[test] fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { // Init values for System Parachain + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); let destination = AssetHubWestend::parent_location(); let beneficiary_id = WestendReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(system_para_to_relay_assertions); - test.set_dispatchable::(system_para_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - assert_eq!(sender_balance_before, sender_balance_after); - assert_eq!(receiver_balance_before, receiver_balance_after); -} - -/// Limited Reserve Transfers of native asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut test = SystemParaToParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( - test.args.assets.clone(), - 0, - test.args.weight_limit, - test.args.beneficiary, - test.args.dest, - ) + let assets: MultiAssets = (Parent, amount_to_send).into(); + let fee_asset_item = 0; + + // this should fail + AssetHubWestend::execute_with(|| { + let result = + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ); + assert_err!( + result, + DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0], + message: Some("Filtered") + }) + ); }); - - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers } /// Reserve Transfers of native asset from System Parachain to Parachain should work @@ -350,14 +177,15 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let mut test = SystemParaToParaTest::new(test_args); let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; - test.set_assertion::(system_para_to_para_assertions); - // TODO: Add assertion for Penpal runtime. Right now message is failing with - // `UntrustedReserveLocation` - test.set_dispatchable::(system_para_to_para_reserve_transfer_assets); + test.set_assertion::(system_para_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); + test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); test.assert(); let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( @@ -369,45 +197,10 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { ) }); + // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // TODO: Check receiver balance when Penpal runtime is improved to propery handle reserve - // transfers -} - -/// Limited Reserve Transfers of a local asset from System Parachain to Parachain should work -#[test] -fn limited_reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account - AssetHubWestend::force_create_and_mint_asset( - ASSET_ID, - ASSET_MIN_BALANCE, - true, - AssetHubWestendSender::get(), - Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, - ); - - // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), - }; - - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); - - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); } /// Reserve Transfers of a local asset from System Parachain to Parachain should work @@ -442,6 +235,6 @@ fn reserve_transfer_asset_from_system_para_to_para() { system_para_test.set_assertion::(system_para_to_para_assets_assertions); // TODO: Add assertions when Penpal is able to manage assets system_para_test - .set_dispatchable::(system_para_to_para_reserve_transfer_assets); + .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); system_para_test.assert(); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 57e1b93f349..d618cd2fe04 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -51,7 +51,7 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { assert_expected_events!( Westend, vec![ - // Amount is witdrawn from Relay Chain's `CheckAccount` + // Amount is withdrawn from Relay Chain's `CheckAccount` RuntimeEvent::Balances(pallet_balances::Event::Withdraw { who, amount }) => { who: *who == ::XcmPallet::check_account(), amount: *amount == t.args.amount, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index 52ad3241e51..e4ed77884bf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -962,7 +962,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1200,6 +1200,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1243,6 +1244,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{KsmLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs index d63c126f082..05262e07410 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs @@ -552,11 +552,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -586,8 +581,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs index 7d49b56e461..cdd4290770f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs @@ -18,13 +18,14 @@ //! Tests for the Statemine (Kusama Assets Hub) chain. use asset_hub_kusama_runtime::xcm_config::{ - AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, TrustBackedAssetsPalletLocation, + AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, LocationToAccountId, + TrustBackedAssetsPalletLocation, }; pub use asset_hub_kusama_runtime::{ xcm_config::{CheckingAccount, ForeignCreatorsSovereignAccountOf, XcmConfig}, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, + RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; use codec::{Decode, Encode}; @@ -518,12 +519,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -632,3 +627,32 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); }) ); + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 57a1150bc88..6f853b6f56e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -868,7 +868,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1082,6 +1082,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1124,6 +1125,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + use xcm::latest::prelude::*; use xcm_config::{DotLocation, MaxAssetsIntoHolding}; use pallet_xcm_benchmarks::asset_instance_from; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs index 6035789a1ae..b3c2ce4da76 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs @@ -476,11 +476,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -510,8 +505,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs index 7200ebc16a2..b7e44646ece 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs @@ -19,12 +19,13 @@ use asset_hub_polkadot_runtime::xcm_config::{ AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, DotLocation, - ForeignCreatorsSovereignAccountOf, TrustBackedAssetsPalletLocation, XcmConfig, + ForeignCreatorsSovereignAccountOf, LocationToAccountId, TrustBackedAssetsPalletLocation, + XcmConfig, }; pub use asset_hub_polkadot_runtime::{ AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, + RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; use codec::{Decode, Encode}; @@ -531,12 +532,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -657,3 +652,32 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); }) ); + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index f649ebedeff..06dcfb99a65 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1077,7 +1077,7 @@ mod benches { [pallet_xcm_bridge_hub_router, ToWestend] [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1315,6 +1315,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1368,6 +1369,39 @@ impl_runtime_apis! { Config as XcmBridgeHubRouterConfig, }; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + impl XcmBridgeHubRouterConfig for Runtime { fn make_congested() { cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index ff37ff74e75..b0bf9e82729 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -667,11 +667,6 @@ pub type XcmRouter = WithUniqueTopic<( ToRococoXcmRouter, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -701,8 +696,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index c3d3c4abbbb..b4f4e828dde 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -529,12 +529,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -930,6 +924,35 @@ mod asset_hub_rococo_tests { actual ); } + + #[test] + fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); + } } mod asset_hub_wococo_tests { diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f1a15265b90..d88aa2607e2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1013,7 +1013,7 @@ mod benches { [cumulus_pallet_dmp_queue, DmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1297,6 +1297,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1343,6 +1344,39 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between AH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // AH can reserve transfer native token to some random parachain. + let random_para_id = 43211234; + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( + random_para_id.into() + ); + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + ParentThen(Parachain(random_para_id).into()).into(), + )) + } + } + use pallet_xcm_bridge_hub_router::benchmarking::{ Pallet as XcmBridgeHubRouterBench, Config as XcmBridgeHubRouterConfig, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index ec42618513a..17312c0f46e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -636,11 +636,6 @@ pub type XcmRouter = WithUniqueTopic<( ToRococoXcmRouter, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -666,8 +661,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index de87a98fb0b..7922b04e807 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -525,12 +525,6 @@ asset_test_utils::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1000 ); @@ -815,3 +809,32 @@ fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { }, ) } + +#[test] +fn reserve_transfer_native_asset_to_non_teleport_para_works() { + asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + ParachainSystem, + XcmpQueue, + LocationToAccountId, + >( + collator_session_keys(), + ExistentialDeposit::get(), + AccountId::from(ALICE), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), + _ => None, + } + }), + Box::new(|runtime_event_encoded: Vec| { + match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { + Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), + _ => None, + } + }), + WeightLimit::Unlimited, + ); +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs index 471b1f09b56..872ad06ddd5 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/lib.rs @@ -27,6 +27,21 @@ use std::fmt::Debug; use xcm::latest::prelude::*; use xcm_builder::{CreateMatcher, MatchXcm}; +/// Given a message, a sender, and a destination, it returns the delivery fees +fn get_fungible_delivery_fees(destination: MultiLocation, message: Xcm<()>) -> u128 { + let Ok((_, delivery_fees)) = validate_send::(destination, message) else { + unreachable!("message can be sent; qed") + }; + if let Some(delivery_fee) = delivery_fees.inner().first() { + let Fungible(delivery_fee_amount) = delivery_fee.fun else { + unreachable!("asset is fungible; qed"); + }; + delivery_fee_amount + } else { + 0 + } +} + /// Helper function to verify `xcm` contains all relevant instructions expected on destination /// chain as part of a reserve-asset-transfer. pub(crate) fn assert_matches_reserve_asset_deposited_instructions( diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 5fb34e7a571..f1cc76350a0 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -16,25 +16,28 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets. use super::xcm_helpers; +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; use codec::Encode; +use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ assert_noop, assert_ok, traits::{ - fungible::Mutate, fungibles::InspectEnumerable, Get, OnFinalize, OnInitialize, OriginTrait, + fungible::Mutate, fungibles::InspectEnumerable, Currency, Get, OnFinalize, OnInitialize, + OriginTrait, }, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; use parachains_common::{AccountId, Balance}; use parachains_runtimes_test_utils::{ - assert_metadata, assert_total, AccountIdOf, BalanceOf, CollatorSessionKeys, ExtBuilder, - ValidatorIdOf, XcmReceivedFrom, + assert_metadata, assert_total, mock_open_hrmp_channel, AccountIdOf, BalanceOf, + CollatorSessionKeys, ExtBuilder, ValidatorIdOf, XcmReceivedFrom, }; use sp_runtime::{ traits::{MaybeEquivalence, StaticLookup, Zero}, DispatchError, Saturating, }; -use xcm::latest::prelude::*; +use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_executor::{traits::ConvertLocation, XcmExecutor}; type RuntimeHelper = @@ -43,8 +46,8 @@ type RuntimeHelper = // Re-export test_case from `parachains-runtimes-test-utils` pub use parachains_runtimes_test_utils::test_cases::change_storage_constant_by_governance_works; -/// Test-case makes sure that `Runtime` can receive native asset from relay chain -/// and can teleport it back and to the other parachains +/// Test-case makes sure that `Runtime` can receive native asset from relay chain and can teleport +/// it back pub fn teleports_for_native_asset_works< Runtime, AllPalletsWithoutSystem, @@ -57,9 +60,6 @@ pub fn teleports_for_native_asset_works< existential_deposit: BalanceOf, target_account: AccountIdOf, unwrap_pallet_xcm_event: Box) -> Option>>, - unwrap_xcmp_queue_event: Box< - dyn Fn(Vec) -> Option>, - >, runtime_para_id: u32, ) where Runtime: frame_system::Config @@ -164,12 +164,13 @@ pub fn teleports_for_native_asset_works< // 2. try to teleport asset back to the relaychain { let dest = MultiLocation::parent(); - let dest_beneficiary = MultiLocation::parent() + let mut dest_beneficiary = MultiLocation::parent() .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::free_balance(&target_account); @@ -223,65 +224,53 @@ pub fn teleports_for_native_asset_works< ); } - // 3. try to teleport asset away to other parachain (1234) + // 3. try to teleport assets away to other parachain (2345): should not work as we don't + // trust `IsTeleporter` for `(relay-native-asset, para(2345))` pair { - let other_para_id = 1234; + let other_para_id = 2345; let dest = MultiLocation::new(1, X1(Parachain(other_para_id))); - let dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::free_balance(&target_account); + let native_asset_to_teleport_away = native_asset_amount_unit * 3.into(); assert!( native_asset_to_teleport_away < target_account_balance_before_teleport - existential_deposit ); - - assert_ok!(RuntimeHelper::::do_teleport_assets::( - RuntimeHelper::::origin_of(target_account.clone()), - dest, - dest_beneficiary, - (native_asset_id, native_asset_to_teleport_away.into()), - Some((runtime_para_id, other_para_id)), - included_head, - &alice, - )); - - let delivery_fees = - xcm_helpers::transfer_assets_delivery_fees::( - (native_asset_id, native_asset_to_teleport_away.into()).into(), - 0, - Unlimited, - dest_beneficiary, + assert_eq!( + RuntimeHelper::::do_teleport_assets::( + RuntimeHelper::::origin_of(target_account.clone()), dest, - ); + dest_beneficiary, + (native_asset_id, native_asset_to_teleport_away.into()), + Some((runtime_para_id, other_para_id)), + included_head, + &alice, + ), + Err(DispatchError::Module(sp_runtime::ModuleError { + index: 31, + error: [2, 0, 0, 0,], + message: Some("Filtered",), + },),) + ); // check balances assert_eq!( >::free_balance(&target_account), - target_account_balance_before_teleport - - native_asset_to_teleport_away - - delivery_fees.into() + target_account_balance_before_teleport ); assert_eq!( >::free_balance(&CheckingAccount::get()), 0.into() ); - - // check events - RuntimeHelper::::assert_pallet_xcm_event_outcome( - &unwrap_pallet_xcm_event, - |outcome| { - assert_ok!(outcome.ensure_complete()); - }, - ); - assert!(RuntimeHelper::::xcmp_queue_message_sent(unwrap_xcmp_queue_event) - .is_some()); } }) } @@ -298,7 +287,6 @@ macro_rules! include_teleports_for_native_asset_works( $collator_session_key:expr, $existential_deposit:expr, $unwrap_pallet_xcm_event:expr, - $unwrap_xcmp_queue_event:expr, $runtime_para_id:expr ) => { #[test] @@ -318,15 +306,14 @@ macro_rules! include_teleports_for_native_asset_works( $existential_deposit, target_account, $unwrap_pallet_xcm_event, - $unwrap_xcmp_queue_event, $runtime_para_id ) } } ); -/// Test-case makes sure that `Runtime` can receive teleported assets from sibling parachain relay -/// chain +/// Test-case makes sure that `Runtime` can receive teleported assets from sibling parachain, and +/// can teleport it back pub fn teleports_for_foreign_assets_works< Runtime, AllPalletsWithoutSystem, @@ -381,7 +368,7 @@ pub fn teleports_for_foreign_assets_works< ::AccountId: From, ForeignAssetsPalletInstance: 'static, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_para_id = 2222; let foreign_asset_id_multilocation = MultiLocation { parents: 1, @@ -473,7 +460,7 @@ pub fn teleports_for_foreign_assets_works< >(foreign_asset_id_multilocation, 0, 0); assert!(teleported_foreign_asset_amount > asset_minimum_asset_balance); - // 1. process received teleported assets from relaychain + // 1. process received teleported assets from sibling parachain (foreign_para_id) let xcm = Xcm(vec![ // BuyExecution with relaychain native token WithdrawAsset(buy_execution_fee.clone().into()), @@ -551,12 +538,13 @@ pub fn teleports_for_foreign_assets_works< // 2. try to teleport asset back to source parachain (foreign_para_id) { let dest = MultiLocation::new(1, X1(Parachain(foreign_para_id))); - let dest_beneficiary = MultiLocation::new(1, X1(Parachain(foreign_para_id))) + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(foreign_para_id))) .appended_with(AccountId32 { network: None, id: sp_runtime::AccountId32::new([3; 32]).into(), }) .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); let target_account_balance_before_teleport = >::balance( @@ -1108,7 +1096,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor AssetId: Clone + Copy, AssetIdConverter: MaybeEquivalence, { - // foreign parachain with the same consenus currency as asset + // foreign parachain with the same consensus currency as asset let foreign_asset_id_multilocation = MultiLocation { parents: 1, interior: X2(Parachain(2222), GeneralIndex(1234567)) }; let asset_id = AssetIdConverter::convert(&foreign_asset_id_multilocation).unwrap(); @@ -1388,3 +1376,199 @@ macro_rules! include_create_and_manage_foreign_assets_for_local_consensus_parach } } ); + +/// Test-case makes sure that `Runtime` can reserve-transfer asset to other parachains (where +/// teleport is not trusted) +pub fn reserve_transfer_native_asset_to_non_teleport_para_works< + Runtime, + AllPalletsWithoutSystem, + XcmConfig, + HrmpChannelOpener, + HrmpChannelSource, + LocationToAccountId, +>( + collator_session_keys: CollatorSessionKeys, + existential_deposit: BalanceOf, + alice_account: AccountIdOf, + unwrap_pallet_xcm_event: Box) -> Option>>, + unwrap_xcmp_queue_event: Box< + dyn Fn(Vec) -> Option>, + >, + weight_limit: WeightLimit, +) where + Runtime: frame_system::Config + + pallet_balances::Config + + pallet_session::Config + + pallet_xcm::Config + + parachain_info::Config + + pallet_collator_selection::Config + + cumulus_pallet_parachain_system::Config + + cumulus_pallet_xcmp_queue::Config, + AllPalletsWithoutSystem: + OnInitialize> + OnFinalize>, + AccountIdOf: Into<[u8; 32]>, + ValidatorIdOf: From>, + BalanceOf: From, + ::Balance: From + Into, + XcmConfig: xcm_executor::Config, + LocationToAccountId: ConvertLocation>, + ::AccountId: + Into<<::RuntimeOrigin as OriginTrait>::AccountId>, + <::Lookup as StaticLookup>::Source: + From<::AccountId>, + ::AccountId: From, + HrmpChannelOpener: frame_support::inherent::ProvideInherent< + Call = cumulus_pallet_parachain_system::Call, + >, + HrmpChannelSource: XcmpMessageSource, +{ + let runtime_para_id = 1000; + ExtBuilder::::default() + .with_collators(collator_session_keys.collators()) + .with_session_keys(collator_session_keys.session_keys()) + .with_tracing() + .with_safe_xcm_version(3) + .with_para_id(runtime_para_id.into()) + .build() + .execute_with(|| { + let mut alice = [0u8; 32]; + alice[0] = 1; + let included_head = RuntimeHelper::::run_to_block( + 2, + AccountId::from(alice).into(), + ); + + // reserve-transfer native asset with local reserve to remote parachain (2345) + + let other_para_id = 2345; + let native_asset = MultiLocation::parent(); + let dest = MultiLocation::new(1, X1(Parachain(other_para_id))); + let mut dest_beneficiary = MultiLocation::new(1, X1(Parachain(other_para_id))) + .appended_with(AccountId32 { + network: None, + id: sp_runtime::AccountId32::new([3; 32]).into(), + }) + .unwrap(); + dest_beneficiary.reanchor(&dest, XcmConfig::UniversalLocation::get()).unwrap(); + + let reserve_account = LocationToAccountId::convert_location(&dest) + .expect("Sovereign account for reserves"); + let balance_to_transfer = 1_000_000_000_000_u128; + + // open HRMP to other parachain + mock_open_hrmp_channel::( + runtime_para_id.into(), + other_para_id.into(), + included_head, + &alice, + ); + + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 40_000_000_000u128; + // drip 2xED + transfer_amount + delivery_fees_buffer to Alice account + let alice_account_init_balance = existential_deposit.saturating_mul(2.into()) + + balance_to_transfer.into() + + delivery_fees_buffer.into(); + let _ = >::deposit_creating( + &alice_account, + alice_account_init_balance, + ); + // SA of target location needs to have at least ED, otherwise making reserve fails + let _ = >::deposit_creating( + &reserve_account, + existential_deposit, + ); + + // we just check here, that user retains enough balance after withdrawal + // and also we check if `balance_to_transfer` is more than `existential_deposit`, + assert!( + (>::free_balance(&alice_account) - + balance_to_transfer.into()) >= + existential_deposit + ); + // SA has just ED + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + ); + + // local native asset (pallet_balances) + let asset_to_transfer = MultiAsset { + fun: Fungible(balance_to_transfer.into()), + id: Concrete(native_asset), + }; + + // pallet_xcm call reserve transfer + assert_ok!(>::limited_reserve_transfer_assets( + RuntimeHelper::::origin_of(alice_account.clone()), + Box::new(dest.into_versioned()), + Box::new(dest_beneficiary.into_versioned()), + Box::new(VersionedMultiAssets::from(MultiAssets::from(asset_to_transfer))), + 0, + weight_limit, + )); + + // check events + // check pallet_xcm attempted + RuntimeHelper::::assert_pallet_xcm_event_outcome( + &unwrap_pallet_xcm_event, + |outcome| { + assert_ok!(outcome.ensure_complete()); + }, + ); + + // check that xcm was sent + let xcm_sent_message_hash = >::events() + .into_iter() + .filter_map(|e| unwrap_xcmp_queue_event(e.event.encode())) + .find_map(|e| match e { + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { message_hash } => + Some(message_hash), + _ => None, + }); + + // read xcm + let xcm_sent = RuntimeHelper::::take_xcm( + other_para_id.into(), + ) + .unwrap(); + + let delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(dest, Xcm::try_from(xcm_sent.clone()).unwrap()); + + assert_eq!( + xcm_sent_message_hash, + Some(xcm_sent.using_encoded(sp_io::hashing::blake2_256)) + ); + let mut xcm_sent: Xcm<()> = xcm_sent.try_into().expect("versioned xcm"); + + // check sent XCM Program to other parachain + println!("reserve_transfer_native_asset_works sent xcm: {:?}", xcm_sent); + let reserve_assets_deposited = MultiAssets::from(vec![MultiAsset { + id: Concrete(MultiLocation { parents: 1, interior: Here }), + fun: Fungible(1000000000000), + }]); + + assert_matches_reserve_asset_deposited_instructions( + &mut xcm_sent, + &reserve_assets_deposited, + &dest_beneficiary, + ); + + // check alice account decreased by balance_to_transfer ( + delivery_fees) + assert_eq!( + >::free_balance(&alice_account), + alice_account_init_balance - balance_to_transfer.into() - delivery_fees.into() + ); + + // check reserve account + // check reserve account increased by balance_to_transfer + assert_eq!( + >::free_balance(&reserve_account), + existential_deposit + balance_to_transfer.into() + ); + }) +} diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 6c8ac8c6452..851fcd5c7d6 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -16,7 +16,7 @@ //! Module contains predefined test-case scenarios for `Runtime` with various assets transferred //! over a bridge. -use crate::assert_matches_reserve_asset_deposited_instructions; +use crate::{assert_matches_reserve_asset_deposited_instructions, get_fungible_delivery_fees}; use codec::Encode; use cumulus_primitives_core::XcmpMessageSource; use frame_support::{ @@ -32,10 +32,7 @@ use parachains_runtimes_test_utils::{ use sp_runtime::{traits::StaticLookup, Saturating}; use xcm::{latest::prelude::*, VersionedMultiAssets}; use xcm_builder::{CreateMatcher, MatchXcm}; -use xcm_executor::{ - traits::{ConvertLocation, TransactAsset}, - XcmExecutor, -}; +use xcm_executor::{traits::ConvertLocation, XcmExecutor}; pub struct TestBridgingConfig { pub bridged_network: NetworkId, @@ -129,8 +126,13 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< &alice, ); - // drip ED to account - let alice_account_init_balance = existential_deposit + balance_to_transfer.into(); + // we calculate exact delivery fees _after_ sending the message by weighing the sent + // xcm, and this delivery fee varies for different runtimes, so just add enough buffer, + // then verify the arithmetics check out on final balance. + let delivery_fees_buffer = 8_000_000_000_000u128; + // drip ED + transfer_amount + delivery_fees_buffer to Alice account + let alice_account_init_balance = + existential_deposit + balance_to_transfer.into() + delivery_fees_buffer.into(); let _ = >::deposit_creating( &alice_account, alice_account_init_balance, @@ -183,56 +185,6 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< let expected_beneficiary = target_destination_account; - // Make sure sender has enough funds for paying delivery fees - let handling_delivery_fees = { - // Probable XCM with `ReserveAssetDeposited`. - let mut expected_reserve_asset_deposited_message = Xcm(vec![ - ReserveAssetDeposited(MultiAssets::from(expected_assets.clone())), - ClearOrigin, - BuyExecution { - fees: MultiAsset { - id: Concrete(Default::default()), - fun: Fungible(balance_to_transfer), - }, - weight_limit: Unlimited, - }, - DepositAsset { assets: Wild(AllCounted(1)), beneficiary: expected_beneficiary }, - SetTopic([ - 220, 188, 144, 32, 213, 83, 111, 175, 44, 210, 111, 19, 90, 165, 191, 112, - 140, 247, 192, 124, 42, 17, 153, 141, 114, 34, 189, 20, 83, 69, 237, 173, - ]), - ]); - assert_matches_reserve_asset_deposited_instructions( - &mut expected_reserve_asset_deposited_message, - &expected_assets, - &expected_beneficiary, - ); - - // Call `SendXcm::validate` to get delivery fees. - let (_, delivery_fees): (_, MultiAssets) = XcmConfig::XcmSender::validate( - &mut Some(target_location_from_different_consensus), - &mut Some(expected_reserve_asset_deposited_message), - ) - .expect("validate passes"); - // Drip delivery fee to Alice account. - let mut delivery_fees_added = false; - for delivery_fee in delivery_fees.inner() { - assert_ok!(::deposit_asset( - &delivery_fee, - &MultiLocation { - parents: 0, - interior: X1(AccountId32 { - network: None, - id: alice_account.clone().into(), - }), - }, - None, - )); - delivery_fees_added = true; - } - delivery_fees_added - }; - // do pallet_xcm call reserve transfer assert_ok!(>::limited_reserve_transfer_assets( RuntimeHelper::::origin_of(alice_account.clone()), @@ -275,6 +227,7 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< // check sent XCM ExportMessage to BridgeHub + let mut delivery_fees = 0; // 1. check paid or unpaid if let Some(expected_fee_asset_id) = maybe_paid_export_message { xcm_sent @@ -315,6 +268,10 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< .split_global() .expect("split works"); assert_eq!(destination, &target_location_junctions_without_global_consensus); + // Call `SendXcm::validate` to get delivery fees. + delivery_fees = get_fungible_delivery_fees::< + ::XcmSender, + >(target_location_from_different_consensus, inner_xcm.clone()); assert_matches_reserve_asset_deposited_instructions( inner_xcm, &expected_assets, @@ -330,8 +287,8 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< assert_eq!( >::free_balance(&alice_account), alice_account_init_balance - .saturating_sub(existential_deposit) .saturating_sub(balance_to_transfer.into()) + .saturating_sub(delivery_fees.into()) ); // check reserve account increased by balance_to_transfer @@ -341,14 +298,13 @@ pub fn limited_reserve_transfer_assets_for_native_asset_works< ); // check dedicated account increased by delivery fees (if configured) - if handling_delivery_fees { - if let Some(delivery_fees_account) = delivery_fees_account { - let delivery_fees_account_balance_after = - >::free_balance(&delivery_fees_account); - assert!( - delivery_fees_account_balance_after > delivery_fees_account_balance_before - ); - } + if let Some(delivery_fees_account) = delivery_fees_account { + let delivery_fees_account_balance_after = + >::free_balance(&delivery_fees_account); + assert!( + delivery_fees_account_balance_after - delivery_fees.into() >= + delivery_fees_account_balance_before + ); } }) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/README.md b/cumulus/parachains/runtimes/bridge-hubs/README.md index 9bd6557f350..b2a14a0405d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/README.md +++ b/cumulus/parachains/runtimes/bridge-hubs/README.md @@ -270,7 +270,7 @@ cd ### Send messages - transfer asset over bridge (ROCs/WNDs) -Do (asset) transfers: +Do reserve-backed transfers: ``` cd @@ -291,6 +291,20 @@ cd - AssetHubWestend (see `foreignAssets.Issued`, `xcmpQueue.Success`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer - BridgeHubRocococ (see `bridgeWestendMessages.MessagesDelivered`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer +Do reserve withdraw transfers: (when previous is finished) +``` +cd + +# wrappedWNDs from Rococo's Asset Hub to Westend's. +./cumulus/scripts/bridges_rococo_westend.sh withdraw-reserve-assets-from-asset-hub-rococo-local +``` +``` +cd + +# wrappedROCs from Westend's Asset Hub to Rococo's. +./cumulus/scripts/bridges_rococo_westend.sh withdraw-reserve-assets-from-asset-hub-westend-local +``` + ### Claim relayer's rewards on BridgeHubRococo and BridgeHubWestend **Accounts of BridgeHub parachains:** diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index bd95ec3fda7..b3750700084 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -490,7 +490,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -670,6 +670,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -705,6 +706,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::KsmRelayLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs index 727735c9285..b3703eee830 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs @@ -241,11 +241,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -274,8 +269,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs index 893524e12f6..36d8f0846af 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs @@ -47,11 +47,5 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1002 ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index 4744dc08e8e..841bb4ee861 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -491,7 +491,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -671,6 +671,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -706,6 +707,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::DotRelayLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs index ac7e00fc427..61eee1c4c5a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs @@ -245,11 +245,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -278,8 +273,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs index 0be87bd46fa..3156a5fe68e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs @@ -47,11 +47,5 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), 1002 ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index e5d38bcac23..b17d308b891 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -595,7 +595,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -933,6 +933,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -980,6 +981,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::TokenLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index d98012e061b..1b1e6f8ba71 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -349,11 +349,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmRouter = XcmRouter; @@ -381,8 +376,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 65cca67dac1..39ee2576f5b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -133,12 +133,6 @@ mod bridge_hub_rococo_tests { _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID ); @@ -517,12 +511,6 @@ mod bridge_hub_wococo_tests { _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID ); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 458876ce46c..9e8fd84e712 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -544,7 +544,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -772,6 +772,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -813,6 +814,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between BH and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on BH. + None + } + } + use xcm::latest::prelude::*; use xcm_config::WestendLocation; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index a6abca42215..7084882c41f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -284,11 +284,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type XcmRouter = XcmRouter; @@ -316,8 +311,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index 16dcd10a2ca..4d477e1413e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -118,12 +118,6 @@ bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( _ => None, } }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), bp_bridge_hub_westend::BRIDGE_HUB_WESTEND_PARACHAIN_ID ); diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index b7bfc9f9c6a..206f4614060 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -751,7 +751,7 @@ mod benches { [cumulus_pallet_dmp_queue, DmpQueue] [pallet_alliance, Alliance] [pallet_collective, AllianceMotion] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] [pallet_preimage, Preimage] [pallet_scheduler, Scheduler] [pallet_referenda, FellowshipReferenda] @@ -939,6 +939,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -968,6 +969,29 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Collectives and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Collectives. + None + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs index c0b3108d2fb..71845650bd6 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs @@ -293,11 +293,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - /// Type to convert the Fellows origin to a Plurality `MultiLocation` value. pub type FellowsToPlurality = OriginToPluralityVoice; @@ -325,8 +320,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index e41db7d9213..2a2f4141033 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -433,7 +433,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [pallet_contracts, Contracts] - [pallet_xcm, PolkadotXcm] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] ); } @@ -678,6 +678,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -707,6 +708,30 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} + use xcm::latest::prelude::*; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Contracts-System-Para and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }, + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Contracts-System-Para. + None + } + } + let whitelist: Vec = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index 4c9f357e111..faee1c68fe6 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -227,11 +227,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; // We want to disallow users sending (arbitrary) XCMs from this chain. @@ -258,8 +253,6 @@ impl pallet_xcm::Config for Runtime { type MaxLockers = ConstU32<8>; // FIXME: Replace with benchmarked weight info type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 5ef9af7c712..1ddad31920a 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -434,7 +434,7 @@ parameter_types! { // pub type AssetsForceOrigin = // EnsureOneOf, EnsureXcm>>; -impl pallet_assets::Config for Runtime { +impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type AssetId = AssetId; @@ -577,7 +577,12 @@ impl pallet_asset_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Fungibles = Assets; type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< - pallet_assets::BalanceToAssetBalance, + pallet_assets::BalanceToAssetBalance< + Balances, + Runtime, + ConvertInto, + pallet_assets::Instance1, + >, AssetsToBlockAuthor, >; } @@ -619,7 +624,7 @@ construct_runtime!( MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // The main stage. - Assets: pallet_assets::{Pallet, Call, Storage, Event} = 50, + Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index 710dfd79877..74d9a0b071d 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -38,6 +38,7 @@ use frame_support::{ }; use frame_system::EnsureRoot; use pallet_asset_tx_payment::HandleCredit; +use pallet_assets::Instance1; use pallet_xcm::XcmPassthrough; use polkadot_parachain_primitives::primitives::Sibling; use polkadot_runtime_common::impls::ToAuthor; @@ -48,9 +49,10 @@ use xcm_builder::{ AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, AsPrefixedGeneralIndex, ConvertedConcreteId, CurrencyAdapter, DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, FungiblesAdapter, IsConcrete, LocalMint, NativeAsset, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, TrailingSetTopicAsId, UsingComponents, + WithComputedOrigin, WithUniqueTopic, }; use xcm_executor::{traits::JustTry, XcmExecutor}; @@ -126,6 +128,9 @@ pub type XcmOriginToTransactDispatchOrigin = ( // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when // recognized. SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, // Native signed account converter; this just converts an `AccountId32` origin into a normal // `RuntimeOrigin::Signed` origin of the same 32-byte value. SignedAccountId32AsNative, @@ -182,14 +187,25 @@ pub type Barrier = TrailingSetTopicAsId< /// Type alias to conveniently refer to `frame_system`'s `Config::AccountId`. pub type AccountIdOf = ::AccountId; -/// Asset filter that allows all assets from a certain location. +/// Asset filter that allows all assets from a certain location matching asset id. pub struct AssetsFrom(PhantomData); impl> ContainsPair for AssetsFrom { fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { let loc = T::get(); &loc == origin && matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } - if asset_loc.match_and_split(&loc).is_some()) + if asset_loc.starts_with(&loc)) + } +} + +/// Asset filter that allows native/relay asset if coming from a certain location. +pub struct NativeAssetFrom(PhantomData); +impl> ContainsPair for NativeAssetFrom { + fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { + let loc = T::get(); + &loc == origin && + matches!(asset, MultiAsset { id: AssetId::Concrete(asset_loc), fun: Fungible(_a) } + if *asset_loc == MultiLocation::from(Parent)) } } @@ -208,56 +224,19 @@ where /// A `HandleCredit` implementation that naively transfers the fees to the block author. /// Will drop and burn the assets in case the transfer fails. pub struct AssetsToBlockAuthor(PhantomData); -impl HandleCredit, pallet_assets::Pallet> for AssetsToBlockAuthor +impl HandleCredit, pallet_assets::Pallet> for AssetsToBlockAuthor where - R: pallet_authorship::Config + pallet_assets::Config, + R: pallet_authorship::Config + pallet_assets::Config, AccountIdOf: From + Into, { - fn handle_credit(credit: Credit, pallet_assets::Pallet>) { + fn handle_credit(credit: Credit, pallet_assets::Pallet>) { if let Some(author) = pallet_authorship::Pallet::::author() { // In case of error: Will drop the result triggering the `OnDrop` of the imbalance. - let _ = pallet_assets::Pallet::::resolve(&author, credit); + let _ = pallet_assets::Pallet::::resolve(&author, credit); } } } -pub trait Reserve { - /// Returns assets reserve location. - fn reserve(&self) -> Option; -} - -// Takes the chain part of a MultiAsset -impl Reserve for MultiAsset { - fn reserve(&self) -> Option { - if let AssetId::Concrete(location) = self.id { - let first_interior = location.first_interior(); - let parents = location.parent_count(); - match (parents, first_interior) { - (0, Some(Parachain(id))) => Some(MultiLocation::new(0, X1(Parachain(*id)))), - (1, Some(Parachain(id))) => Some(MultiLocation::new(1, X1(Parachain(*id)))), - (1, _) => Some(MultiLocation::parent()), - _ => None, - } - } else { - None - } - } -} - -/// A `FilterAssetLocation` implementation. Filters multi native assets whose -/// reserve is same with `origin`. -pub struct MultiNativeAsset; -impl ContainsPair for MultiNativeAsset { - fn contains(asset: &MultiAsset, origin: &MultiLocation) -> bool { - if let Some(ref reserve) = asset.reserve() { - if reserve == origin { - return true - } - } - false - } -} - parameter_types! { /// The location that this chain recognizes as the Relay network's Asset Hub. pub SystemAssetHubLocation: MultiLocation = MultiLocation::new(1, X1(Parachain(1000))); @@ -268,7 +247,8 @@ parameter_types! { pub CheckingAccount: AccountId = PolkadotXcm::check_account(); } -pub type Reserves = (NativeAsset, AssetsFrom); +pub type Reserves = + (NativeAsset, AssetsFrom, NativeAssetFrom); pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { @@ -277,7 +257,8 @@ impl xcm_executor::Config for XcmConfig { // How to withdraw and deposit an asset. type AssetTransactor = AssetTransactors; type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = MultiNativeAsset; // TODO: maybe needed to be replaced by Reserves + type IsReserve = Reserves; + // no teleport trust established with other chains type IsTeleporter = NativeAsset; type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -312,11 +293,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -342,8 +318,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 4cb83ccf820..6df00d43e8d 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -492,11 +492,6 @@ pub type XcmRouter = WithUniqueTopic<( XcmpQueue, )>; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -518,8 +513,6 @@ impl pallet_xcm::Config for Runtime { type SovereignAccountOf = LocationToAccountId; type MaxLockers = ConstU32<8>; type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); diff --git a/cumulus/scripts/bridges_rococo_westend.sh b/cumulus/scripts/bridges_rococo_westend.sh index ce8480685aa..82b5f1942b2 100755 --- a/cumulus/scripts/bridges_rococo_westend.sh +++ b/cumulus/scripts/bridges_rococo_westend.sh @@ -301,9 +301,21 @@ case "$1" in 0 \ "Unlimited" ;; + withdraw-reserve-assets-from-asset-hub-rococo-local) + ensure_polkadot_js_api + # send back only 100000000000 wrappedWNDs to Alice account on AHW + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9910" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 140000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; reserve-transfer-assets-from-asset-hub-westend-local) ensure_polkadot_js_api - # send WOCs to Alice account on AHR + # send WNDs to Alice account on AHR limited_reserve_transfer_assets \ "ws://127.0.0.1:9010" \ "//Alice" \ @@ -313,6 +325,18 @@ case "$1" in 0 \ "Unlimited" ;; + withdraw-reserve-assets-from-asset-hub-westend-local) + ensure_polkadot_js_api + # send back only 100000000000 wrappedROCs to Alice account on AHR + limited_reserve_transfer_assets \ + "ws://127.0.0.1:9010" \ + "//Alice" \ + "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ + "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } } }, "fun": { "Fungible": 100000000000 } } ] }')" \ + 0 \ + "Unlimited" + ;; claim-rewards-bridge-hub-rococo-local) ensure_polkadot_js_api # bhwd -> [62, 68, 77, 64] -> 0x62687764 @@ -360,7 +384,9 @@ case "$1" in - init-asset-hub-westend-local - init-bridge-hub-westend-local - reserve-transfer-assets-from-asset-hub-rococo-local + - withdraw-reserve-assets-from-asset-hub-rococo-local - reserve-transfer-assets-from-asset-hub-westend-local + - withdraw-reserve-assets-from-asset-hub-westend-local - claim-rewards-bridge-hub-rococo-local - claim-rewards-bridge-hub-westend-local"; exit 1 diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index 7ff5512d214..f2e4ff397c4 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -1443,9 +1443,9 @@ pub struct TestContext { /// These arguments can be easily reused and shared between the assertion functions /// and dispatchable functions, which are also stored in `Test`. /// `Origin` corresponds to the chain where the XCM interaction starts with an initial execution. -/// `Destination` corresponds to the last chain where an effect of the intial execution is expected -/// happen. `Hops` refer all the ordered intermediary chains an initial XCM execution can provoke -/// some effect. +/// `Destination` corresponds to the last chain where an effect of the initial execution is expected +/// to happen. `Hops` refer to all the ordered intermediary chains an initial XCM execution can +/// provoke some effect on. #[derive(Clone)] pub struct Test where @@ -1499,7 +1499,7 @@ where let chain_name = std::any::type_name::(); self.hops_assertion.insert(chain_name.to_string(), assertion); } - /// Stores an assertion in a particular Chain + /// Stores a dispatchable in a particular Chain pub fn set_dispatchable(&mut self, dispatchable: fn(Self) -> DispatchResult) { let chain_name = std::any::type_name::(); self.hops_dispatchable.insert(chain_name.to_string(), dispatchable); diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index cd73d23bdad..ef9dfedd735 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -2064,7 +2064,7 @@ impl Pallet { } /// Submits a given PVF check statement with corresponding signature as an unsigned transaction - /// into the memory pool. Ultimately, that disseminates the transaction accross the network. + /// into the memory pool. Ultimately, that disseminates the transaction across the network. /// /// This function expects an offchain context and cannot be callable from the on-chain logic. /// diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 57767b70d23..40ef22107a7 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1588,7 +1588,7 @@ mod benches { [pallet_asset_rate, AssetRate] [pallet_whitelist, Whitelist] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] [pallet_xcm_benchmarks::generic, pallet_xcm_benchmarks::generic::Pallet::] ); @@ -2064,6 +2064,8 @@ sp_api::impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -2081,6 +2083,7 @@ sp_api::impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ @@ -2097,6 +2100,33 @@ sp_api::impl_runtime_apis! { impl frame_system_benchmarking::Config for Runtime {} impl frame_benchmarking::baseline::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + Parachain(43211234).into(), + )) + } + } impl pallet_xcm_benchmarks::Config for Runtime { type XcmConfig = XcmConfig; type AccountIdConverter = LocationConverter; diff --git a/polkadot/runtime/rococo/src/xcm_config.rs b/polkadot/runtime/rococo/src/xcm_config.rs index 0814b77414f..c8f8f59dae9 100644 --- a/polkadot/runtime/rococo/src/xcm_config.rs +++ b/polkadot/runtime/rococo/src/xcm_config.rs @@ -211,11 +211,6 @@ parameter_types! { pub const FellowsBodyId: BodyId = BodyId::Technical; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); -} - /// Type to convert an `Origin` type value into a `MultiLocation` value which represents an interior /// location of this chain. pub type LocalOriginToLocation = ( @@ -269,7 +264,5 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/runtime/test-runtime/src/xcm_config.rs b/polkadot/runtime/test-runtime/src/xcm_config.rs index 400658b1386..ae4faecf700 100644 --- a/polkadot/runtime/test-runtime/src/xcm_config.rs +++ b/polkadot/runtime/test-runtime/src/xcm_config.rs @@ -127,11 +127,6 @@ impl xcm_executor::Config for XcmConfig { type Aliasers = Nothing; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(xcm::latest::Junctions::Here.into()); -} - impl pallet_xcm::Config for crate::Runtime { // The config types here are entirely configurable, since the only one that is sorely needed // is `XcmExecutor`, which will be used in unit tests located in xcm-executor. @@ -157,7 +152,5 @@ impl pallet_xcm::Config for crate::Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 1c97e54da48..d80640c016f 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1626,7 +1626,7 @@ mod benches { [pallet_whitelist, Whitelist] [pallet_asset_rate, AssetRate] // XCM - [pallet_xcm, XcmPallet] + [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -2145,6 +2145,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2172,12 +2173,37 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; impl pallet_session_benchmarking::Config for Runtime {} impl pallet_offences_benchmarking::Config for Runtime {} impl pallet_election_provider_support_benchmarking::Config for Runtime {} + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(crate::xcm_config::AssetHub::get()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported to/from AH. + Some(( + MultiAsset { fun: Fungible(EXISTENTIAL_DEPOSIT), id: Concrete(Here.into()) }, + crate::xcm_config::AssetHub::get(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay can reserve transfer native token to some random parachain. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Here.into()) + }, + crate::Junction::Parachain(43211234).into(), + )) + } + } impl frame_system_benchmarking::Config for Runtime {} impl pallet_nomination_pools_benchmarking::Config for Runtime {} impl runtime_parachains::disputes::slashing::benchmarking::Config for Runtime {} diff --git a/polkadot/runtime/westend/src/xcm_config.rs b/polkadot/runtime/westend/src/xcm_config.rs index 64e07317fc7..9ab6470f6da 100644 --- a/polkadot/runtime/westend/src/xcm_config.rs +++ b/polkadot/runtime/westend/src/xcm_config.rs @@ -119,11 +119,6 @@ parameter_types! { pub const MaxAssetsIntoHolding: u32 = 64; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(ASSET_HUB_ID).into()); -} - pub type TrustedTeleporters = ( xcm_builder::Case, xcm_builder::Case, @@ -265,7 +260,5 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 6b5d5e75de8..cc5d7d97c45 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -13,7 +13,6 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive serde = { version = "1.0.188", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false} frame-system = { path = "../../../substrate/frame/system", default-features = false} sp-core = { path = "../../../substrate/primitives/core", default-features = false} @@ -25,8 +24,12 @@ xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } +# marked optional, used in benchmarking +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false, optional = true } + [dev-dependencies] -pallet-balances = { path = "../../../substrate/frame/balances" } +pallet-assets = { path = "../../../substrate/frame/assets" } polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } @@ -39,6 +42,7 @@ std = [ "frame-support/std", "frame-system/std", "log/std", + "pallet-balances/std", "scale-info/std", "serde", "sp-core/std", @@ -53,6 +57,7 @@ runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-parachains/runtime-benchmarks", @@ -63,6 +68,7 @@ runtime-benchmarks = [ try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 3eecbfec518..3aca24791fc 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -16,15 +16,56 @@ use super::*; use bounded_collections::{ConstU32, WeakBoundedVec}; -use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; -use frame_support::weights::Weight; +use frame_benchmarking::{benchmarks, whitelisted_caller, BenchmarkError, BenchmarkResult}; +use frame_support::{traits::Currency, weights::Weight}; use frame_system::RawOrigin; use sp_std::prelude::*; use xcm::{latest::prelude::*, v2}; type RuntimeOrigin = ::RuntimeOrigin; +// existential deposit multiplier +const ED_MULTIPLIER: u32 = 100; + +/// Pallet we're benchmarking here. +pub struct Pallet(crate::Pallet); + +/// Trait that must be implemented by runtime to be able to benchmark pallet properly. +pub trait Config: crate::Config { + /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. + /// + /// If `None`, the benchmarks that depend on a reachable destination will be skipped. + fn reachable_dest() -> Option { + None + } + + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// teleported to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will be skipped. + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + None + } + + /// A `(MultiAsset, MultiLocation)` pair representing asset and the destination it can be + /// reserve-transferred to. Used only in benchmarks. + /// + /// Implementation should also make sure `dest` is reachable/connected. + /// + /// If `None`, the benchmarks that depend on this will be skipped. + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + None + } +} + benchmarks! { + where_clause { + where + T: pallet_balances::Config, + ::Balance: From + Into, + } send { let send_origin = T::SendXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; @@ -32,7 +73,7 @@ benchmarks! { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let msg = Xcm(vec![ClearOrigin]); - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_dest: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); @@ -40,44 +81,82 @@ benchmarks! { }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_msg)) teleport_assets { - let asset: MultiAsset = (Here, 10).into(); - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let (asset, destination) = T::teleportable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + let assets: MultiAssets = asset.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmTeleportFilter::contains(&(origin_location, vec![asset.clone()])) { + if !T::XcmTeleportFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = asset.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } reserve_transfer_assets { - let asset: MultiAsset = (Here, 10).into(); - let send_origin = - T::ExecuteXcmOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; - let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone()) + let (asset, destination) = T::reserve_transferable_asset_and_dest().ok_or( + BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), + )?; + + let transferred_amount = match &asset.fun { + Fungible(amount) => *amount, + _ => return Err(BenchmarkError::Stop("Benchmark asset not fungible")), + }.into(); + let assets: MultiAssets = asset.into(); + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + assert!(balance >= transferred_amount); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + // verify initial balance + assert_eq!(pallet_balances::Pallet::::free_balance(&caller), balance); + + let send_origin = RawOrigin::Signed(caller.clone()); + let origin_location = T::ExecuteXcmOrigin::try_origin(send_origin.clone().into()) .map_err(|_| BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)))?; - if !T::XcmReserveTransferFilter::contains(&(origin_location, vec![asset.clone()])) { + if !T::XcmReserveTransferFilter::contains(&(origin_location, assets.clone().into_inner())) { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let recipient = [0u8; 32]; - let versioned_dest: VersionedMultiLocation = T::ReachableDest::get().ok_or( - BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), - )? - .into(); + let versioned_dest: VersionedMultiLocation = destination.into(); let versioned_beneficiary: VersionedMultiLocation = AccountId32 { network: None, id: recipient.into() }.into(); - let versioned_assets: VersionedMultiAssets = asset.into(); - }: _>(send_origin, Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + let versioned_assets: VersionedMultiAssets = assets.into(); + }: _>(send_origin.into(), Box::new(versioned_dest), Box::new(versioned_beneficiary), Box::new(versioned_assets), 0) + verify { + // verify balance after transfer, decreased by transferred amount (+ maybe XCM delivery fees) + assert!(pallet_balances::Pallet::::free_balance(&caller) <= balance - transferred_amount); + } execute { let execute_origin = @@ -92,7 +171,7 @@ benchmarks! { }: _>(execute_origin, Box::new(versioned_msg), Weight::zero()) force_xcm_version { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let xcm_version = 2; @@ -101,18 +180,18 @@ benchmarks! { force_default_xcm_version {}: _(RawOrigin::Root, Some(2)) force_subscribe_version_notify { - let versioned_loc: VersionedMultiLocation = T::ReachableDest::get().ok_or( + let versioned_loc: VersionedMultiLocation = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )? .into(); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_unsubscribe_version_notify { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX)), )?; let versioned_loc: VersionedMultiLocation = loc.into(); - let _ = Pallet::::request_version_notify(loc); + let _ = crate::Pallet::::request_version_notify(loc); }: _(RawOrigin::Root, Box::new(versioned_loc)) force_suspension {}: _(RawOrigin::Root, true) @@ -122,7 +201,7 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); SupportedVersion::::insert(old_version, loc, old_version); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateSupportedVersion, Weight::zero()); } migrate_version_notifiers { @@ -130,22 +209,22 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifiers::::insert(old_version, loc, 0); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateVersionNotifiers, Weight::zero()); } already_notified_target { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads(1))), )?; let loc = VersionedMultiLocation::from(loc); let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_current_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); @@ -153,7 +232,7 @@ benchmarks! { let old_version = current_version - 1; VersionNotifyTargets::::insert(current_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::NotifyCurrentTargets(None), Weight::zero()); } notify_target_migration_fail { @@ -167,7 +246,7 @@ benchmarks! { let current_version = T::AdvertisedXcmVersion::get(); VersionNotifyTargets::::insert(current_version, bad_loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_version_notify_targets { @@ -176,18 +255,18 @@ benchmarks! { let loc = VersionedMultiLocation::from(MultiLocation::from(Parent)); VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), current_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } migrate_and_notify_old_targets { - let loc = T::ReachableDest::get().ok_or( + let loc = T::reachable_dest().ok_or( BenchmarkError::Override(BenchmarkResult::from_weight(T::DbWeight::get().reads_writes(1, 3))), )?; let loc = VersionedMultiLocation::from(loc); let old_version = T::AdvertisedXcmVersion::get() - 1; VersionNotifyTargets::::insert(old_version, loc, (0, Weight::zero(), old_version)); }: { - Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); + crate::Pallet::::check_xcm_version_change(VersionMigrationStage::MigrateAndNotifyOldTargets, Weight::zero()); } new_query { @@ -195,14 +274,14 @@ benchmarks! { let timeout = 1u32.into(); let match_querier = MultiLocation::from(Here); }: { - Pallet::::new_query(responder, timeout, match_querier); + crate::Pallet::::new_query(responder, timeout, match_querier); } take_response { let responder = MultiLocation::from(Parent); let timeout = 1u32.into(); let match_querier = MultiLocation::from(Here); - let query_id = Pallet::::new_query(responder, timeout, match_querier); + let query_id = crate::Pallet::::new_query(responder, timeout, match_querier); let infos = (0 .. xcm::v3::MaxPalletsInfo::get()).map(|_| PalletInfo::new( u32::MAX, (0..xcm::v3::MaxPalletNameLen::get()).map(|_| 97u8).collect::>().try_into().unwrap(), @@ -211,10 +290,10 @@ benchmarks! { u32::MAX, u32::MAX, ).unwrap()).collect::>(); - Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); + crate::Pallet::::expect_response(query_id, Response::PalletsInfo(infos.try_into().unwrap())); }: { - as QueryHandler>::take_response(query_id); + as QueryHandler>::take_response(query_id); } impl_benchmark_test_suite!( diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 2d969fb870c..8157620465f 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "runtime-benchmarks")] -mod benchmarking; +pub mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] @@ -55,9 +55,9 @@ use xcm_builder::{ }; use xcm_executor::{ traits::{ - CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, DropAssets, MatchesFungible, - OnResponse, Properties, QueryHandler, QueryResponseStatus, VersionChangeNotifier, - WeightBounds, + AssetTransferError, CheckSuspension, ClaimAssets, ConvertLocation, ConvertOrigin, + DropAssets, MatchesFungible, OnResponse, Properties, QueryHandler, QueryResponseStatus, + TransactAsset, TransferType, VersionChangeNotifier, WeightBounds, XcmAssetTransfers, }, Assets, }; @@ -222,7 +222,7 @@ pub mod pallet { type XcmExecuteFilter: Contains<(MultiLocation, Xcm<::RuntimeCall>)>; /// Something to execute an XCM message. - type XcmExecutor: ExecuteXcm<::RuntimeCall>; + type XcmExecutor: ExecuteXcm<::RuntimeCall> + XcmAssetTransfers; /// Our XCM filter which messages to be teleported using the dedicated extrinsic must pass. type XcmTeleportFilter: Contains<(MultiLocation, Vec)>; @@ -275,12 +275,6 @@ pub mod pallet { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; - - /// A `MultiLocation` that can be reached via `XcmRouter`. Used only in benchmarks. - /// - /// If `None`, the benchmarks that depend on a reachable destination will be skipped. - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest: Get>; } impl ExecuteControllerWeightInfo for Pallet { @@ -531,8 +525,8 @@ pub mod pallet { NoSubscription, /// The location is invalid since it already has a subscription from us. AlreadySubscribed, - /// Invalid asset for the operation. - InvalidAsset, + /// Could not check-out the assets for teleportation to the destination chain. + CannotCheckOutTeleport, /// The owner does not own (all) of the asset that they wish to do the operation on. LowBalance, /// The asset owner has too many locks on the asset. @@ -545,6 +539,16 @@ pub mod pallet { LockNotFound, /// The unlock operation cannot succeed because there are still consumers of the lock. InUse, + /// Invalid non-concrete asset. + InvalidAssetNotConcrete, + /// Invalid asset, reserve chain could not be determined for it. + InvalidAssetUnknownReserve, + /// Invalid asset, do not support remote asset reserves with different fees reserves. + InvalidAssetUnsupportedReserve, + /// Too many assets with different reserve locations have been attempted for transfer. + TooManyReserves, + /// Local XCM execution of asset transfer incomplete. + LocalExecutionIncomplete, } impl From for Error { @@ -557,6 +561,15 @@ pub mod pallet { } } + impl From for Error { + fn from(e: AssetTransferError) -> Self { + match e { + AssetTransferError::NotConcrete => Error::::InvalidAssetNotConcrete, + AssetTransferError::UnknownReserve => Error::::InvalidAssetUnknownReserve, + } + } + } + /// The status of a query. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum QueryStatus { @@ -907,11 +920,7 @@ pub mod pallet { let mut message = Xcm(vec![ WithdrawAsset(assets), SetFeesMode { jit_withdraw: true }, - InitiateTeleport { - assets: Wild(AllCounted(count)), - dest, - xcm: Xcm(vec![]), - }, + InitiateTeleport { assets: Wild(AllCounted(count)), dest, xcm: Xcm(vec![]) }, ]); T::Weigher::weight(&mut message).map_or(Weight::MAX, |w| T::WeightInfo::teleport_assets().saturating_add(w)) } @@ -954,6 +963,8 @@ pub mod pallet { match (maybe_assets, maybe_dest) { (Ok(assets), Ok(dest)) => { use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to + // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM let mut message = Xcm(vec![ SetFeesMode { jit_withdraw: true }, TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } @@ -1114,6 +1125,8 @@ pub mod pallet { match (maybe_assets, maybe_dest) { (Ok(assets), Ok(dest)) => { use sp_std::vec; + // heaviest version of locally executed XCM program: equivalent in weight to + // transfer assets to SA, reanchor them, extend XCM program, and send onward XCM let mut message = Xcm(vec![ SetFeesMode { jit_withdraw: true }, TransferReserveAsset { assets, dest, xcm: Xcm(vec![]) } @@ -1273,6 +1286,33 @@ impl QueryHandler for Pallet { } impl Pallet { + /// Validate `assets` to be reserve-transferred and return their reserve location. + fn validate_assets_and_find_reserve( + assets: &[MultiAsset], + dest: &MultiLocation, + ) -> Result> { + let mut reserve = None; + for asset in assets.iter() { + if let Fungible(x) = asset.fun { + // If fungible asset, ensure non-zero amount. + ensure!(!x.is_zero(), Error::::Empty); + } + let transfer_type = + T::XcmExecutor::determine_for(&asset, dest).map_err(Error::::from)?; + // Ensure asset is not teleportable to `dest`. + ensure!(transfer_type != TransferType::Teleport, Error::::Filtered); + if let Some(reserve) = reserve.as_ref() { + // Ensure transfer for multiple assets uses same reserve location (only fee may have + // different reserve location) + ensure!(reserve == &transfer_type, Error::::TooManyReserves); + } else { + // asset reserve identified + reserve = Some(transfer_type); + } + } + reserve.ok_or(Error::::Empty) + } + fn do_reserve_transfer_assets( origin: OriginFor, dest: Box, @@ -1286,35 +1326,75 @@ impl Pallet { let beneficiary: MultiLocation = (*beneficiary).try_into().map_err(|()| Error::::BadVersion)?; let assets: MultiAssets = (*assets).try_into().map_err(|()| Error::::BadVersion)?; + log::trace!( + target: "xcm::pallet_xcm::do_reserve_transfer_assets", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, fee-idx {:?}", + origin_location, dest, beneficiary, assets, fee_asset_item, + ); ensure!(assets.len() <= MAX_ASSETS_FOR_TRANSFER, Error::::TooManyAssets); let value = (origin_location, assets.into_inner()); ensure!(T::XcmReserveTransferFilter::contains(&value), Error::::Filtered); - let (origin_location, assets) = value; - let context = T::UniversalLocation::get(); - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? - .clone() - .reanchored(&dest, context) - .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; - let assets: MultiAssets = assets.into(); - let xcm = Xcm(vec![ - BuyExecution { fees, weight_limit }, - DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, - ]); - let mut message = Xcm(vec![ - SetFeesMode { jit_withdraw: true }, - TransferReserveAsset { assets, dest, xcm }, - ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome }); - Ok(()) + let (origin_location, mut assets) = value; + + if fee_asset_item as usize >= assets.len() { + return Err(Error::::Empty.into()) + } + let fees = assets.swap_remove(fee_asset_item as usize); + let fees_transfer_type = + T::XcmExecutor::determine_for(&fees, &dest).map_err(Error::::from)?; + let assets_transfer_type = if assets.is_empty() { + // Single asset to transfer (one used for fees where transfer type is determined above). + ensure!(fees_transfer_type != TransferType::Teleport, Error::::Filtered); + fees_transfer_type + } else { + // Find reserve for non-fee assets. + Self::validate_assets_and_find_reserve(&assets, &dest)? + }; + + // local and remote XCM programs to potentially handle fees separately + let separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>; + if fees_transfer_type == assets_transfer_type { + // Same reserve location (fees not teleportable), we can batch together fees and assets + // in same reserve-based-transfer. + assets.push(fees.clone()); + // no need for custom fees instructions, fees are batched with assets + separate_fees_instructions = None; + } else { + // Disallow _remote reserves_ unless assets & fees have same remote reserve (covered by + // branch above). The reason for this is that we'd need to send XCMs to separate chains + // with no guarantee of delivery order on final destination; therefore we cannot + // guarantee to have fees in place on final destination chain to pay for assets + // transfer. + ensure!( + !matches!(assets_transfer_type, TransferType::RemoteReserve(_)), + Error::::InvalidAssetUnsupportedReserve + ); + let fees = fees.clone(); + let weight_limit = weight_limit.clone(); + // build fees transfer instructions to be added to assets transfers XCM programs + separate_fees_instructions = Some(match fees_transfer_type { + TransferType::LocalReserve => + Self::local_reserve_fees_instructions(dest, fees, weight_limit)?, + TransferType::DestinationReserve => + Self::destination_reserve_fees_instructions(dest, fees, weight_limit)?, + TransferType::Teleport => + Self::teleport_fees_instructions(dest, fees, weight_limit)?, + TransferType::RemoteReserve(_) => + return Err(Error::::InvalidAssetUnsupportedReserve.into()), + }); + }; + + Self::build_and_execute_xcm_transfer_type( + origin_location, + dest, + beneficiary, + assets, + assets_transfer_type, + fees, + separate_fees_instructions, + weight_limit, + ) } fn do_teleport_assets( @@ -1335,31 +1415,384 @@ impl Pallet { let value = (origin_location, assets.into_inner()); ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); let (origin_location, assets) = value; + for asset in assets.iter() { + let transfer_type = + T::XcmExecutor::determine_for(asset, &dest).map_err(Error::::from)?; + ensure!(matches!(transfer_type, TransferType::Teleport), Error::::Filtered); + } + let fees = assets.get(fee_asset_item as usize).ok_or(Error::::Empty)?.clone(); + + Self::build_and_execute_xcm_transfer_type( + origin_location, + dest, + beneficiary, + assets, + TransferType::Teleport, + fees, + None, + weight_limit, + ) + } + + fn build_and_execute_xcm_transfer_type( + origin: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + transfer_type: TransferType, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> DispatchResult { + log::trace!( + target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + "origin {:?}, dest {:?}, beneficiary {:?}, assets {:?}, transfer_type {:?}, \ + fees {:?}, fees_xcm: {:?}, weight_limit: {:?}", + origin, dest, beneficiary, assets, transfer_type, fees, separate_fees_instructions, weight_limit, + ); + let (mut local_xcm, remote_xcm) = match transfer_type { + TransferType::LocalReserve => { + let (local, remote) = Self::local_reserve_transfer_programs( + dest, + beneficiary, + assets, + fees, + separate_fees_instructions, + weight_limit, + )?; + (local, Some(remote)) + }, + TransferType::DestinationReserve => { + let (local, remote) = Self::destination_reserve_transfer_programs( + dest, + beneficiary, + assets, + fees, + separate_fees_instructions, + weight_limit, + )?; + (local, Some(remote)) + }, + TransferType::RemoteReserve(reserve) => ( + Self::remote_reserve_transfer_program( + reserve, + dest, + beneficiary, + assets, + fees, + weight_limit, + )?, + None, + ), + TransferType::Teleport => ( + Self::teleport_assets_program(dest, beneficiary, assets, fees, weight_limit)?, + None, + ), + }; + let weight = + T::Weigher::weight(&mut local_xcm).map_err(|()| Error::::UnweighableMessage)?; + let hash = local_xcm.using_encoded(sp_io::hashing::blake2_256); + let outcome = + T::XcmExecutor::execute_xcm_in_credit(origin, local_xcm, hash, weight, weight); + Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); + if let Some(remote_xcm) = remote_xcm { + outcome.ensure_complete().map_err(|_| Error::::LocalExecutionIncomplete)?; + + let (ticket, price) = validate_send::(dest, remote_xcm.clone()) + .map_err(Error::::from)?; + if origin != Here.into_location() { + Self::charge_fees(origin, price).map_err(|_| Error::::FeesNotMet)?; + } + let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; + + let e = Event::Sent { origin, destination: dest, message: remote_xcm, message_id }; + Self::deposit_event(e); + } + Ok(()) + } + + fn local_reserve_fees_instructions( + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { let context = T::UniversalLocation::get(); - let fees = assets - .get(fee_asset_item as usize) - .ok_or(Error::::Empty)? + let reanchored_fees = fees .clone() .reanchored(&dest, context) .map_err(|_| Error::::CannotReanchor)?; - let max_assets = assets.len() as u32; + + let local_execute_xcm = Xcm(vec![ + // move `fees` to `dest`s local sovereign account + TransferAsset { assets: fees.into(), beneficiary: dest }, + ]); + let xcm_on_dest = Xcm(vec![ + // let (dest) chain know `fees` are in its SA on reserve + ReserveAssetDeposited(reanchored_fees.clone().into()), + // buy exec using `fees` in holding deposited in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn local_reserve_transfer_programs( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + separate_fees_instructions.as_ref().map(|_| 1).unwrap_or(0); + let assets: MultiAssets = assets.into(); + let context = T::UniversalLocation::get(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // fees are either handled through dedicated instructions, or batched together with assets + let fees_already_handled = separate_fees_instructions.is_some(); + let (fees_local_xcm, fees_remote_xcm) = separate_fees_instructions + .map(|(local, remote)| (local.into_inner(), remote.into_inner())) + .unwrap_or_default(); + + // start off with any necessary local fees specific instructions + let mut local_execute_xcm = fees_local_xcm; + // move `assets` to `dest`s local sovereign account + local_execute_xcm.push(TransferAsset { assets, beneficiary: dest }); + + // on destination chain, start off with custom fee instructions + let mut xcm_on_dest = fees_remote_xcm; + // continue with rest of assets + xcm_on_dest.extend_from_slice(&[ + // let (dest) chain know assets are in its SA on reserve + ReserveAssetDeposited(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + if !fees_already_handled { + // no custom fees instructions, they are batched together with `assets` transfer; + // BuyExecution happens after receiving all `assets` + let reanchored_fees = + fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // buy execution using `fees` batched together with above `reanchored_assets` + xcm_on_dest.push(BuyExecution { fees: reanchored_fees, weight_limit }); + } + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest.push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((Xcm(local_execute_xcm), Xcm(xcm_on_dest))) + } + + fn destination_reserve_fees_instructions( + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let context = T::UniversalLocation::get(); + let reanchored_fees = fees + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + let fees: MultiAssets = fees.into(); + + let local_execute_xcm = Xcm(vec![ + // withdraw reserve-based fees (derivatives) + WithdrawAsset(fees.clone()), + // burn derivatives + BurnAsset(fees), + ]); + let xcm_on_dest = Xcm(vec![ + // withdraw `fees` from origin chain's sovereign account + WithdrawAsset(reanchored_fees.clone().into()), + // buy exec using `fees` in holding withdrawn in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn destination_reserve_transfer_programs( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + separate_fees_instructions: Option<(Xcm<::RuntimeCall>, Xcm<()>)>, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + // max assets is `assets` (+ potentially separately handled fee) + let max_assets = + assets.len() as u32 + separate_fees_instructions.as_ref().map(|_| 1).unwrap_or(0); let assets: MultiAssets = assets.into(); - let xcm = Xcm(vec![ + let context = T::UniversalLocation::get(); + let mut reanchored_assets = assets.clone(); + reanchored_assets + .reanchor(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // fees are either handled through dedicated instructions, or batched together with assets + let fees_already_handled = separate_fees_instructions.is_some(); + let (fees_local_xcm, fees_remote_xcm) = separate_fees_instructions + .map(|(local, remote)| (local.into_inner(), remote.into_inner())) + .unwrap_or_default(); + + // start off with any necessary local fees specific instructions + let mut local_execute_xcm = fees_local_xcm; + // continue with rest of assets + local_execute_xcm.extend_from_slice(&[ + // withdraw reserve-based assets + WithdrawAsset(assets.clone()), + // burn reserve-based assets + BurnAsset(assets), + ]); + + // on destination chain, start off with custom fee instructions + let mut xcm_on_dest = fees_remote_xcm; + // continue with rest of assets + xcm_on_dest.extend_from_slice(&[ + // withdraw `assets` from origin chain's sovereign account + WithdrawAsset(reanchored_assets), + // following instructions are not exec'ed on behalf of origin chain anymore + ClearOrigin, + ]); + if !fees_already_handled { + // no custom fees instructions, they are batched together with `assets` transfer; + // BuyExecution happens after receiving all `assets` + let reanchored_fees = + fees.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // buy execution using `fees` batched together with above `reanchored_assets` + xcm_on_dest.push(BuyExecution { fees: reanchored_fees, weight_limit }); + } + // deposit all remaining assets in holding to `beneficiary` location + xcm_on_dest.push(DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }); + + Ok((Xcm(local_execute_xcm), Xcm(xcm_on_dest))) + } + + // function assumes fees and assets have the same remote reserve + fn remote_reserve_transfer_program( + reserve: MultiLocation, + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result::RuntimeCall>, Error> { + let max_assets = assets.len() as u32; + let context = T::UniversalLocation::get(); + // we spend up to half of fees for execution on reserve and other half for execution on + // destination + let (fees_half_1, fees_half_2) = Self::halve_fees(fees)?; + // identifies fee item as seen by `reserve` - to be used at reserve chain + let reserve_fees = fees_half_1 + .reanchored(&reserve, context) + .map_err(|_| Error::::CannotReanchor)?; + // identifies fee item as seen by `dest` - to be used at destination chain + let dest_fees = + fees_half_2.reanchored(&dest, context).map_err(|_| Error::::CannotReanchor)?; + // identifies `dest` as seen by `reserve` + let dest = dest.reanchored(&reserve, context).map_err(|_| Error::::CannotReanchor)?; + // xcm to be executed at dest + let xcm_on_dest = Xcm(vec![ + BuyExecution { fees: dest_fees, weight_limit: weight_limit.clone() }, + DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, + ]); + // xcm to be executed on reserve + let xcm_on_reserve = Xcm(vec![ + BuyExecution { fees: reserve_fees, weight_limit }, + DepositReserveAsset { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, + ]); + Ok(Xcm(vec![ + WithdrawAsset(assets.into()), + InitiateReserveWithdraw { + assets: Wild(AllCounted(max_assets)), + reserve, + xcm: xcm_on_reserve, + }, + ])) + } + + fn teleport_fees_instructions( + dest: MultiLocation, + fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let context = T::UniversalLocation::get(); + let reanchored_fees = fees + .clone() + .reanchored(&dest, context) + .map_err(|_| Error::::CannotReanchor)?; + + // XcmContext irrelevant in teleports checks + let dummy_context = + XcmContext { origin: None, message_id: Default::default(), topic: None }; + // We should check that the asset can actually be teleported out (for this to + // be in error, there would need to be an accounting violation by ourselves, + // so it's unlikely, but we don't want to allow that kind of bug to leak into + // a trusted chain. + ::AssetTransactor::can_check_out( + &dest, + &fees, + &dummy_context, + ) + .map_err(|_| Error::::CannotCheckOutTeleport)?; + ::AssetTransactor::check_out( + &dest, + &fees, + &dummy_context, + ); + + let fees: MultiAssets = fees.into(); + let local_execute_xcm = Xcm(vec![ + // withdraw fees + WithdrawAsset(fees.clone()), + // burn fees + BurnAsset(fees), + ]); + let xcm_on_dest = Xcm(vec![ + // (dest) chain receive teleported assets burned on origin chain + ReceiveTeleportedAsset(reanchored_fees.clone().into()), + // buy exec using `fees` in holding received in above instruction + BuyExecution { fees: reanchored_fees, weight_limit }, + ]); + Ok((local_execute_xcm, xcm_on_dest)) + } + + fn teleport_assets_program( + dest: MultiLocation, + beneficiary: MultiLocation, + assets: Vec, + mut fees: MultiAsset, + weight_limit: WeightLimit, + ) -> Result::RuntimeCall>, Error> { + let context = T::UniversalLocation::get(); + fees.reanchor(&dest, context).map_err(|_| Error::::CannotReanchor)?; + let max_assets = assets.len() as u32; + let xcm_on_dest = Xcm(vec![ BuyExecution { fees, weight_limit }, DepositAsset { assets: Wild(AllCounted(max_assets)), beneficiary }, ]); - let mut message = Xcm(vec![ - WithdrawAsset(assets), + Ok(Xcm(vec![ + WithdrawAsset(assets.into()), SetFeesMode { jit_withdraw: true }, - InitiateTeleport { assets: Wild(AllCounted(max_assets)), dest, xcm }, - ]); - let weight = - T::Weigher::weight(&mut message).map_err(|()| Error::::UnweighableMessage)?; - let hash = message.using_encoded(sp_io::hashing::blake2_256); - let outcome = - T::XcmExecutor::execute_xcm_in_credit(origin_location, message, hash, weight, weight); - Self::deposit_event(Event::Attempted { outcome }); - Ok(()) + InitiateTeleport { assets: Wild(AllCounted(max_assets)), dest, xcm: xcm_on_dest }, + ])) + } + + /// Halve `fees` fungible amount. + pub(crate) fn halve_fees(fees: MultiAsset) -> Result<(MultiAsset, MultiAsset), Error> { + match fees.fun { + Fungible(amount) => { + let fee1 = amount.saturating_div(2); + let fee2 = amount.saturating_sub(fee1); + ensure!(fee1 > 0, Error::::FeesNotMet); + ensure!(fee2 > 0, Error::::FeesNotMet); + Ok((MultiAsset::from((fees.id, fee1)), MultiAsset::from((fees.id, fee2)))) + }, + NonFungible(_) => Err(Error::::FeesNotMet), + } } /// Will always make progress, and will do its best not to use much more than `weight_cutoff` diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 3b41ad90ec9..026838993f1 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -17,7 +17,9 @@ use codec::Encode; use frame_support::{ construct_runtime, match_types, parameter_types, - traits::{ConstU32, Everything, EverythingBut, Nothing}, + traits::{ + AsEnsureOriginWithArg, ConstU128, ConstU32, Equals, Everything, EverythingBut, Nothing, + }, weights::Weight, }; use frame_system::EnsureRoot; @@ -32,11 +34,15 @@ use xcm::prelude::*; use xcm_builder::{ AccountId32Aliases, AllowKnownQueryResponses, AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, Case, ChildParachainAsNative, ChildParachainConvertsVia, - ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, FixedRateOfFungible, - FixedWeightBounds, IsConcrete, SignedAccountId32AsNative, SignedToAccountId32, + ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, DescribeAllTerminal, + FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, HashedDescription, IsConcrete, + MatchedConvertedConcreteId, NoChecking, SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, XcmFeeManagerFromComponents, XcmFeeToAccount, }; -use xcm_executor::XcmExecutor; +use xcm_executor::{ + traits::{Identity, JustTry}, + XcmExecutor, +}; use crate::{self as pallet_xcm, TestWeightInfo}; @@ -137,6 +143,7 @@ construct_runtime!( { System: frame_system::{Pallet, Call, Storage, Config, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Config, Event}, ParasOrigin: origin::{Pallet, Origin}, XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config}, TestNotifier: pallet_test_notifier::{Pallet, Call, Event}, @@ -179,13 +186,13 @@ impl SendXcm for TestSendXcmErrX8 { type Ticket = (MultiLocation, Xcm<()>); fn validate( dest: &mut Option, - msg: &mut Option>, + _: &mut Option>, ) -> SendResult<(MultiLocation, Xcm<()>)> { - let (dest, msg) = (dest.take().unwrap(), msg.take().unwrap()); - if dest.len() == 8 { + if dest.as_ref().unwrap().len() == 8 { + dest.take(); Err(SendError::Transport("Destination location full")) } else { - Ok(((dest, msg), MultiAssets::new())) + Err(SendError::NotApplicable) } } fn deliver(pair: (MultiLocation, Xcm<()>)) -> Result { @@ -280,18 +287,135 @@ impl pallet_balances::Config for Test { type MaxFreezes = ConstU32<0>; } +#[cfg(feature = "runtime-benchmarks")] +/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. +pub struct XcmBenchmarkHelper; +#[cfg(feature = "runtime-benchmarks")] +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> MultiLocation { + MultiLocation { parents: 1, interior: X1(Parachain(id)) } + } +} + +impl pallet_assets::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = MultiLocation; + type AssetIdParameter = MultiLocation; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = ConstU128<1>; + type AssetAccountDeposit = ConstU128<10>; + type MetadataDepositBase = ConstU128<1>; + type MetadataDepositPerByte = ConstU128<1>; + type ApprovalDeposit = ConstU128<1>; + type StringLimit = ConstU32<50>; + type Freezer = (); + type WeightInfo = (); + type CallbackHandle = (); + type Extra = (); + type RemoveItemsLimit = ConstU32<5>; + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = XcmBenchmarkHelper; +} + +// This child parachain is a system parachain trusted to teleport native token. +pub const SOME_SYSTEM_PARA: u32 = 1001; + +// This child parachain acts as trusted reserve for its assets in tests. +// USDT allowed to teleport to/from here. +pub const FOREIGN_ASSET_RESERVE_PARA_ID: u32 = 2001; +// Inner junction of reserve asset on `FOREIGN_ASSET_RESERVE_PARA_ID`. +pub const FOREIGN_ASSET_INNER_JUNCTION: Junction = GeneralIndex(1234567); + +// This child parachain acts as trusted reserve for say.. USDC that can be used for fees. +pub const USDC_RESERVE_PARA_ID: u32 = 2002; +// Inner junction of reserve asset on `USDC_RESERVE_PARA_ID`. +pub const USDC_INNER_JUNCTION: Junction = PalletInstance(42); + +// This child parachain is a trusted teleporter for say.. USDT (T from Teleport :)). +// We'll use USDT in tests that teleport fees. +pub const USDT_PARA_ID: u32 = 2003; + +// This child parachain is not configured as trusted reserve or teleport location for any assets. +pub const OTHER_PARA_ID: u32 = 2009; + parameter_types! { pub const RelayLocation: MultiLocation = Here.into_location(); + pub const NativeAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(Here.into_location()), + }; + pub const SystemParachainLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(SOME_SYSTEM_PARA)) + }; + pub const ForeignReserveLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID)) + }; + pub const ForeignAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X2(Parachain(FOREIGN_ASSET_RESERVE_PARA_ID), FOREIGN_ASSET_INNER_JUNCTION), + }), + }; + pub const UsdcReserveLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(USDC_RESERVE_PARA_ID)) + }; + pub const Usdc: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X2(Parachain(USDC_RESERVE_PARA_ID), USDC_INNER_JUNCTION), + }), + }; + pub const UsdtTeleportLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(USDT_PARA_ID)) + }; + pub const Usdt: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X1(Parachain(USDT_PARA_ID)), + }), + }; pub const AnyNetwork: Option = None; pub UniversalLocation: InteriorMultiLocation = Here; pub UnitWeightCost: u64 = 1_000; + pub CheckingAccount: AccountId = XcmPallet::check_account(); } -pub type SovereignAccountOf = - (ChildParachainConvertsVia, AccountId32Aliases); +pub type SovereignAccountOf = ( + ChildParachainConvertsVia, + AccountId32Aliases, + HashedDescription, +); -pub type LocalAssetTransactor = - XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; +pub type ForeignAssetsConvertedConcreteId = MatchedConvertedConcreteId< + MultiLocation, + Balance, + // Excludes relay/parent chain currency + EverythingBut<(Equals,)>, + Identity, + JustTry, +>; + +pub type AssetTransactors = ( + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>, + FungiblesAdapter< + Assets, + ForeignAssetsConvertedConcreteId, + SovereignAccountOf, + AccountId, + NoChecking, + CheckingAccount, + >, +); type LocalOriginConverter = ( SovereignSignedViaLocation, @@ -303,7 +427,12 @@ type LocalOriginConverter = ( parameter_types! { pub const BaseXcmWeight: Weight = Weight::from_parts(1_000, 1_000); pub CurrencyPerSecondPerByte: (AssetId, u128, u128) = (Concrete(RelayLocation::get()), 1, 1); - pub TrustedAssets: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub TrustedLocal: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); + pub TrustedSystemPara: (MultiAssetFilter, MultiLocation) = (NativeAsset::get().into(), SystemParachainLocation::get()); + pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), UsdtTeleportLocation::get()); + pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), ForeignReserveLocation::get()); + pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); + pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (Usdc::get().into(), UsdcReserveLocation::get()); pub const MaxInstructions: u32 = 100; pub const MaxAssetsIntoHolding: u32 = 64; pub XcmFeesTargetAccount: AccountId = AccountId::new([167u8; 32]); @@ -323,14 +452,21 @@ pub type Barrier = ( AllowSubscriptionsFrom, ); +pub type XcmRouter = (TestPaidForPara3000SendXcm, TestSendXcmErrX8, TestSendXcm); + pub struct XcmConfig; impl xcm_executor::Config for XcmConfig { type RuntimeCall = RuntimeCall; - type XcmSender = (TestPaidForPara3000SendXcm, TestSendXcm); - type AssetTransactor = LocalAssetTransactor; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; type OriginConverter = LocalOriginConverter; - type IsReserve = (); - type IsTeleporter = Case; + type IsReserve = (Case, Case); + type IsTeleporter = ( + Case, + Case, + Case, + Case, + ); type UniversalLocation = UniversalLocation; type Barrier = Barrier; type Weigher = FixedWeightBounds; @@ -360,15 +496,10 @@ parameter_types! { pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; } -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; - type XcmRouter = (TestSendXcmErrX8, TestPaidForPara3000SendXcm, TestSendXcm); + type XcmRouter = XcmRouter; type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; @@ -380,6 +511,7 @@ impl pallet_xcm::Config for Test { type RuntimeCall = RuntimeCall; const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; type AdvertisedXcmVersion = AdvertisedXcmVersion; + type AdminOrigin = EnsureRoot; type TrustedLockers = (); type SovereignAccountOf = AccountId32Aliases<(), AccountId32>; type Currency = Balances; @@ -388,9 +520,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; - type AdminOrigin = EnsureRoot; } impl origin::Config for Test {} @@ -401,6 +530,24 @@ impl pallet_test_notifier::Config for Test { type RuntimeCall = RuntimeCall; } +#[cfg(feature = "runtime-benchmarks")] +impl super::benchmarking::Config for Test { + fn reachable_dest() -> Option { + Some(Parachain(1000).into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some((NativeAsset::get(), SystemParachainLocation::get())) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + Some(( + MultiAsset { fun: Fungible(10), id: Concrete(Here.into_location()) }, + Parachain(OTHER_PARA_ID).into(), + )) + } +} + pub(crate) fn last_event() -> RuntimeEvent { System::events().pop().expect("RuntimeEvent expected").event } @@ -416,10 +563,10 @@ pub(crate) fn buy_execution(fees: impl Into) -> Instruction { pub(crate) fn buy_limited_execution( fees: impl Into, - weight: Weight, + weight_limit: WeightLimit, ) -> Instruction { use xcm::latest::prelude::*; - BuyExecution { fees: fees.into(), weight_limit: Limited(weight) } + BuyExecution { fees: fees.into(), weight_limit } } pub(crate) fn new_test_ext_with_balances( diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs new file mode 100644 index 00000000000..b02b0fd33c3 --- /dev/null +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -0,0 +1,1405 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +#![cfg(test)] + +use crate::{ + mock::*, + tests::{ALICE, BOB, FEE_AMOUNT, INITIAL_BALANCE, SEND_AMOUNT}, +}; +use frame_support::{ + assert_ok, + traits::{tokens::fungibles::Inspect, Currency}, + weights::Weight, +}; +use polkadot_parachain_primitives::primitives::Id as ParaId; +use sp_runtime::{traits::AccountIdConversion, DispatchError, ModuleError}; +use xcm::prelude::*; +use xcm_executor::traits::ConvertLocation; + +// Helper function to deduplicate testing different teleport types. +fn do_test_and_verify_teleport_assets( + expected_beneficiary: MultiLocation, + call: Call, + expected_weight_limit: WeightLimit, +) { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 3; + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + // call extrinsic + call(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + RelayLocation::get().into(), + Xcm(vec![ + ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Here, SEND_AMOUNT), expected_weight_limit), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: expected_beneficiary + }, + ]), + )] + ); + let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); + let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); + assert_eq!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + }); +} + +/// Test `teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn teleport_assets_works() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + do_test_and_verify_teleport_assets( + beneficiary, + || { + assert_ok!(XcmPallet::teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + }, + Unlimited, + ); +} + +/// Test `limited_teleport_assets` +/// +/// Asserts that the sender's balance is decreased as a result of execution of +/// local effects. +#[test] +fn limited_teleport_assets_works() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + do_test_and_verify_teleport_assets( + beneficiary, + || { + assert_ok!(XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(RelayLocation::get().into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); + }, + expected_weight_limit, + ); +} + +/// Test `reserve_transfer_assets_with_paid_router_works` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +/// Verifies that XCM router fees (`SendXcm::validate` -> `MultiAssets`) are withdrawn from correct +/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). +#[test] +fn reserve_transfer_assets_with_paid_router_works() { + let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); + let paid_para_id = Para3000::get(); + let balances = vec![ + (user_account.clone(), INITIAL_BALANCE), + (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), + (XcmFeesTargetAccount::get(), INITIAL_BALANCE), + ]; + new_test_ext_with_balances(balances).execute_with(|| { + let xcm_router_fee_amount = Para3000PaymentAmount::get(); + let weight = BaseXcmWeight::get(); + let dest: MultiLocation = + Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); + assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); + assert_ok!(XcmPallet::reserve_transfer_assets( + RuntimeOrigin::signed(user_account.clone()), + Box::new(Parachain(paid_para_id).into()), + Box::new(dest.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + )); + + // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount + assert_eq!( + Balances::free_balance(user_account), + INITIAL_BALANCE - SEND_AMOUNT - xcm_router_fee_amount + ); + + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(paid_para_id).into_account_truncating(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + + // XcmFeesTargetAccount where should lend xcm_router_fee_amount + assert_eq!( + Balances::free_balance(XcmFeesTargetAccount::get()), + INITIAL_BALANCE + xcm_router_fee_amount + ); + + let dest_para: MultiLocation = Parachain(paid_para_id).into(); + assert_eq!( + sent_xcm(), + vec![( + dest_para, + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_execution((Parent, SEND_AMOUNT)), + DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, + ]), + )] + ); + let mut last_events = last_events(5).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + // balances events + last_events.next().unwrap(); + last_events.next().unwrap(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: dest, + fees: Para3000PaymentMultiAssets::get(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +fn set_up_foreign_asset( + reserve_para_id: u32, + inner_junction: Option, + initial_amount: u128, + is_sufficient: bool, +) -> (MultiLocation, AccountId, MultiLocation) { + let reserve_location = + RelayLocation::get().pushed_with_interior(Parachain(reserve_para_id)).unwrap(); + let reserve_sovereign_account = + SovereignAccountOf::convert_location(&reserve_location).unwrap(); + + let foreign_asset_id_multilocation = if let Some(junction) = inner_junction { + reserve_location.pushed_with_interior(junction).unwrap() + } else { + reserve_location + }; + + // create sufficient (to be used as fees as well) foreign asset (0 total issuance) + assert_ok!(Assets::force_create( + RuntimeOrigin::root(), + foreign_asset_id_multilocation, + BOB, + is_sufficient, + 1 + )); + // this asset should have been teleported/reserve-transferred in, but for this test we just + // mint it locally. + assert_ok!(Assets::mint( + RuntimeOrigin::signed(BOB), + foreign_asset_id_multilocation, + ALICE, + initial_amount + )); + + (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) +} + +// Helper function that provides correct `fee_index` after `sort()` done by +// `vec![MultiAsset, MultiAsset].into()`. +fn into_multiassets_checked( + fee_asset: MultiAsset, + transfer_asset: MultiAsset, +) -> (MultiAssets, usize, MultiAsset, MultiAsset) { + let assets: MultiAssets = vec![fee_asset.clone(), transfer_asset.clone()].into(); + let fee_index = if assets.get(0).unwrap().eq(&fee_asset) { 0 } else { 1 }; + (assets, fee_index, fee_asset, transfer_asset) +} + +/// Test `limited_reserve_transfer_assets` with local asset reserve and local fee reserve. +/// +/// Transferring native asset (local reserve) to some `OTHER_PARA_ID` (no teleport trust). +/// Using native asset for fees as well. +/// +/// ```nocompile +/// Here (source) OTHER_PARA_ID (destination) +/// | `assets` reserve +/// | `fees` reserve +/// | +/// | 1. execute `TransferReserveAsset(assets_and_fees_batched_together)` +/// | \--> sends `ReserveAssetDeposited(both), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn limited_reserve_transfer_assets_with_local_asset_reserve_and_local_fee_reserve_works() { + let balances = vec![ + (ALICE, INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), + ]; + + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + let weight_limit = WeightLimit::Limited(Weight::from_parts(5000, 5000)); + let expected_weight_limit = weight_limit.clone(); + let expected_beneficiary = beneficiary; + let dest: MultiLocation = Parachain(OTHER_PARA_ID).into(); + + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + // call extrinsic + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new((Here, SEND_AMOUNT).into()), + 0, + weight_limit, + )); + // Alice spent amount + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Destination account (parachain account) has amount + let para_acc: AccountId = ParaId::from(OTHER_PARA_ID).into_account_truncating(); + assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), + ClearOrigin, + buy_limited_execution((Parent, SEND_AMOUNT), expected_weight_limit), + DepositAsset { + assets: AllCounted(1).into(), + beneficiary: expected_beneficiary + }, + ]), + )] + ); + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: expected_beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and local fee reserve. +/// +/// Transferring foreign asset (`FOREIGN_ASSET_RESERVE_PARA_ID` reserve) to +/// `FOREIGN_ASSET_RESERVE_PARA_ID` (no teleport trust). +/// Using native asset (local reserve) for fees. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` reserve `assets` reserve +/// | +/// | 1. execute `TransferReserveAsset(fees)` +/// | \-> sends `ReserveAssetDeposited(fees), ClearOrigin, BuyExecution(fees), DepositAsset` +/// | 2. execute `InitiateReserveWithdraw(assets)` +/// | \--> sends `WithdrawAsset(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +/// +/// Asserts that the sender's balance is decreased and the beneficiary's balance +/// is increased. Verifies the correct message is sent and event is emitted. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_local_fee_reserve_works() { + let weight = BaseXcmWeight::get() * 3; + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is reserve location (no teleport trust) + let dest = reserve_location; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // foreign asset to transfer - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Alice used native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - FEE_AMOUNT); + // Destination account (parachain account) added native reserve used as fee to balances + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), FEE_AMOUNT); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + // `fees` are being sent through local-reserve transfer because fee reserve is + // local chain; `assets` are burned on source and withdrawn from SA here + Xcm(vec![ + ReserveAssetDeposited((Parent, FEE_AMOUNT).into()), + buy_limited_execution(expected_fee, Unlimited), + WithdrawAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and local fee reserve. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `OTHER_PARA_ID`. +/// Using native (local reserve) as fee should be disallowed. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_local_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is OTHER_PARA_ID (foreign asset needs to go through its reserve + // chain) + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let (assets, fee_index, _, _) = into_multiassets_checked( + // native asset for fee - local reserve + (MultiLocation::here(), FEE_AMOUNT).into(), + // foreign asset to transfer - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // try the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + + // Alice transferred nothing + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + // Alice spent native asset for fees + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and destination fee reserve. +/// +/// Transferring native asset (local reserve) to `USDC_RESERVE_PARA_ID` (no teleport trust). Using +/// foreign asset (`USDC_RESERVE_PARA_ID` reserve) for fees. +/// +/// ```nocompile +/// Here (source) USDC_RESERVE_PARA_ID (destination) +/// | `assets` reserve `fees` reserve +/// | +/// | 1. execute `InitiateReserveWithdraw(fees)` +/// | \--> sends `WithdrawAsset(fees), ClearOrigin, BuyExecution(fees), DepositAsset` +/// | 2. execute `TransferReserveAsset(assets)` +/// | \-> sends `ReserveAssetDeposited(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_destination_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (usdc_reserve_location, usdc_chain_sovereign_account, usdc_id_multilocation) = + set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // native assets transfer to fee reserve location (no teleport trust) + let dest = usdc_reserve_location; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // usdc for fees (is sufficient on local chain too) - destination reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 3; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + + // Alice spent (fees) amount + assert_eq!( + Assets::balance(usdc_id_multilocation, ALICE), + usdc_initial_local_amount - FEE_AMOUNT + ); + // Alice used native asset for transfer + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), SEND_AMOUNT); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) + let expected_issuance = usdc_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are being sent through destination-reserve transfer because fee reserve + // is destination chain + WithdrawAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // transfer is through local-reserve transfer because `assets` (native asset) + // have local reserve + ReserveAssetDeposited(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and destination fee reserve. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` reserve +/// | `assets` reserve +/// | +/// | 1. execute `InitiateReserveWithdraw(assets_and_fees_batched_together)` +/// | \--> sends `WithdrawAsset(batch), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_destination_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // we'll send just this foreign asset back to its reserve location and use it for fees as + // well + let foreign_initial_amount = 142; + let (reserve_location, reserve_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + true, + ); + + // transfer destination is reserve location + let dest = reserve_location; + let assets: MultiAssets = vec![(foreign_asset_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // reanchor according to test-case + let mut expected_assets = assets.clone(); + expected_assets.reanchor(&dest, UniversalLocation::get()).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index, + Unlimited, + )); + + let weight = BaseXcmWeight::get() * 2; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Alice's native asset balance is untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Reserve sovereign account has same balances + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + // Verify total and active issuance of foreign BLA have decreased (burned on + // reserve-withdraw) + let expected_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + Parachain(FOREIGN_ASSET_RESERVE_PARA_ID).into(), + Xcm(vec![ + WithdrawAsset(expected_assets.clone()), + ClearOrigin, + buy_limited_execution(expected_assets.get(0).unwrap().clone(), Unlimited), + DepositAsset { assets: AllCounted(1).into(), beneficiary }, + ]), + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and destination fee reserve is +/// disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to +/// `USDC_RESERVE_PARA_ID`. Using USDC (destination reserve) as fee. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_destination_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 42; + let (usdc_chain, _, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, _, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is USDC chain (foreign asset BLA needs to go through its separate + // reserve chain) + let dest = usdc_chain; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - destination reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and remote fee reserve is disallowed. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_remote_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain != fee reserve location (no teleport trust) + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + let dest_sovereign_account = SovereignAccountOf::convert_location(&dest).unwrap(); + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - remote reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Sovereign account of reserve parachain is unchanged + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(dest_sovereign_account), 0); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and remote fee reserve is +/// disallowed. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_remote_fee_reserve_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 42; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is asset reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDC for fees (is sufficient on local chain too) - remote reserve + (usdc_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + let expected_usdc_issuance = usdc_initial_local_amount; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and (same) remote fee reserve. +/// +/// Transferring native asset (local reserve) to `OTHER_PARA_ID` (no teleport trust). Using foreign +/// asset (`USDC_RESERVE_PARA_ID` remote reserve) for fees. +/// +/// ```nocompile +/// | chain `A` | chain `C` | chain `B` +/// | Here (source) | USDC_RESERVE_PARA_ID | OTHER_PARA_ID (destination) +/// | | `fees` reserve | +/// | | `assets` reserve | +/// | +/// | 1. `A` executes `InitiateReserveWithdraw(both)` dest `C` +/// | -----------------> `C` executes `DepositReserveAsset(both)` dest `B` +/// | --------------------------> `DepositAsset(both)` +/// ``` +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_remote_fee_reserve_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (usdc_chain, usdc_chain_sovereign_account, usdc_id_multilocation) = + set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let assets: MultiAssets = vec![(usdc_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_dest_on_reserve = dest.reanchored(&usdc_chain, context).unwrap(); + let fees = assets.get(fee_index).unwrap().clone(); + let (fees_half_1, fees_half_2) = XcmPallet::halve_fees(fees).unwrap(); + let mut expected_assets_on_reserve = assets.clone(); + expected_assets_on_reserve.reanchor(&usdc_chain, context).unwrap(); + let expected_fee_on_reserve = fees_half_1.reanchored(&usdc_chain, context).unwrap(); + let expected_fee_on_dest = fees_half_2.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + assert!(matches!( + last_event(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(_) }) + )); + + // Alice spent (transferred) amount + assert_eq!( + Assets::balance(usdc_id_multilocation, ALICE), + usdc_initial_local_amount - SEND_AMOUNT + ); + // Alice's native asset balance is untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Destination account (parachain account) has expected (same) balances + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC have decreased (burned on reserve-withdraw) + let expected_usdc_issuance = usdc_initial_local_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(usdc_id_multilocation), expected_usdc_issuance); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), expected_usdc_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + // first message sent to reserve chain + usdc_chain, + Xcm(vec![ + WithdrawAsset(expected_assets_on_reserve), + ClearOrigin, + BuyExecution { fees: expected_fee_on_reserve, weight_limit: Unlimited }, + DepositReserveAsset { + assets: Wild(AllCounted(1)), + // final destination is `dest` as seen by `reserve` + dest: expected_dest_on_reserve, + // message sent onward to `dest` + xcm: Xcm(vec![ + buy_limited_execution(expected_fee_on_dest, Unlimited), + DepositAsset { assets: AllCounted(1).into(), beneficiary } + ]) + } + ]) + )], + ); + }); +} + +/// Test `reserve_transfer_assets` with local asset reserve and teleported fee. +/// +/// Transferring native asset (local reserve) to `USDT_PARA_ID`. Using teleport-trusted USDT for +/// fees. +/// +/// ```nocompile +/// Here (source) USDT_PARA_ID (destination) +/// | `assets` reserve `fees` teleport-trust +/// | +/// | 1. execute `InitiateTeleport(fees)` +/// | \--> sends `ReceiveTeleportedAsset(fees), .., DepositAsset(fees)` +/// | 2. execute `TransferReserveAsset(assets)` +/// | \-> sends `ReserveAssetDeposited(assets), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_local_asset_reserve_and_teleported_fee_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // native assets transfer destination is USDT chain (teleport trust only for USDT) + let dest = usdt_chain; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 3; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice spent (fees) amount + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - FEE_AMOUNT + ); + // Alice used native asset for transfer + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); + // Sovereign account of dest parachain holds `SEND_AMOUNT` native asset in local reserve + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), SEND_AMOUNT); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are teleported to destination chain + ReceiveTeleportedAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // transfer is through local-reserve transfer because `assets` (native + // asset) have local reserve + ReserveAssetDeposited(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with destination asset reserve and teleported fee. +/// +/// Transferring foreign asset (destination reserve) to `FOREIGN_ASSET_RESERVE_PARA_ID`. Using +/// teleport-trusted USDT for fees. +/// +/// ```nocompile +/// Here (source) FOREIGN_ASSET_RESERVE_PARA_ID (destination) +/// | `fees` (USDT) teleport-trust +/// | `assets` reserve +/// | +/// | 1. execute `InitiateTeleport(fees)` +/// | \--> sends `ReceiveTeleportedAsset(fees), .., DepositAsset(fees)` +/// | 2. execute `InitiateReserveWithdraw(assets)` +/// | \--> sends `WithdrawAsset(asset), ClearOrigin, BuyExecution(fees), DepositAsset` +/// \------------------------------------------> +/// ``` +#[test] +fn reserve_transfer_assets_with_destination_asset_reserve_and_teleported_fee_works() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (_, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (reserve_location, foreign_sovereign_account, foreign_asset_id_multilocation) = + set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is asset reserve location + let dest = reserve_location; + let dest_sovereign_account = foreign_sovereign_account; + + let (assets, fee_index, fee_asset, xfer_asset) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - destination reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // reanchor according to test-case + let context = UniversalLocation::get(); + let expected_fee = fee_asset.reanchored(&dest, context).unwrap(); + let expected_asset = xfer_asset.reanchored(&dest, context).unwrap(); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + assert_ok!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + )); + let weight = BaseXcmWeight::get() * 4; + let mut last_events = last_events(3).into_iter(); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) + ); + assert_eq!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::FeesPaid { + paying: beneficiary, + fees: MultiAssets::new(), + }) + ); + assert!(matches!( + last_events.next().unwrap(), + RuntimeEvent::XcmPallet(crate::Event::Sent { .. }) + )); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice spent USDT for fees + assert_eq!( + Assets::balance(usdt_id_multilocation, ALICE), + usdt_initial_local_amount - FEE_AMOUNT + ); + // Alice transferred BLA + assert_eq!( + Assets::balance(foreign_asset_id_multilocation, ALICE), + foreign_initial_amount - SEND_AMOUNT + ); + // Verify balances of USDT reserve parachain + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify balances of transferred-asset reserve parachain + assert_eq!(Balances::free_balance(dest_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, dest_sovereign_account), 0); + // Verify total and active issuance of USDT have decreased (teleported) + let expected_usdt_issuance = usdt_initial_local_amount - FEE_AMOUNT; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + // Verify total and active issuance of foreign BLA asset have decreased (burned on + // reserve-withdraw) + let expected_bla_issuance = foreign_initial_amount - SEND_AMOUNT; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + + // Verify sent XCM program + assert_eq!( + sent_xcm(), + vec![( + dest, + Xcm(vec![ + // fees are teleported to destination chain + ReceiveTeleportedAsset(expected_fee.clone().into()), + buy_limited_execution(expected_fee, Unlimited), + // assets are withdrawn from origin's local SA + WithdrawAsset(expected_asset.into()), + ClearOrigin, + DepositAsset { assets: AllCounted(2).into(), beneficiary }, + ]) + )] + ); + }); +} + +/// Test `reserve_transfer_assets` with remote asset reserve and teleported fee is disallowed. +/// +/// Transferring foreign asset (reserve on `FOREIGN_ASSET_RESERVE_PARA_ID`) to `USDT_PARA_ID`. +/// Using teleport-trusted USDT for fees. +#[test] +fn reserve_transfer_assets_with_remote_asset_reserve_and_teleported_fee_disallowed() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // create non-sufficient foreign asset BLA (0 total issuance) + let foreign_initial_amount = 142; + let (_, reserve_sovereign_account, foreign_asset_id_multilocation) = set_up_foreign_asset( + FOREIGN_ASSET_RESERVE_PARA_ID, + Some(FOREIGN_ASSET_INNER_JUNCTION), + foreign_initial_amount, + false, + ); + + // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) + let dest = usdt_chain; + + let (assets, fee_index, _, _) = into_multiassets_checked( + // USDT for fees (is sufficient on local chain too) - teleported + (usdt_id_multilocation, FEE_AMOUNT).into(), + // foreign asset to transfer (not used for fees) - remote reserve + (foreign_asset_id_multilocation, SEND_AMOUNT).into(), + ); + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [22, 0, 0, 0], + message: Some("InvalidAssetUnsupportedReserve") + })) + ); + // Alice native asset untouched + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, ALICE), foreign_initial_amount); + assert_eq!(Balances::free_balance(usdt_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + assert_eq!(Balances::free_balance(reserve_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(foreign_asset_id_multilocation, reserve_sovereign_account), 0); + let expected_usdt_issuance = usdt_initial_local_amount; + assert_eq!(Assets::total_issuance(usdt_id_multilocation), expected_usdt_issuance); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), expected_usdt_issuance); + let expected_bla_issuance = foreign_initial_amount; + assert_eq!(Assets::total_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + assert_eq!(Assets::active_issuance(foreign_asset_id_multilocation), expected_bla_issuance); + }); +} + +/// Test `reserve_transfer_assets` single asset which is teleportable - should fail. +/// +/// Attempting to reserve-transfer teleport-trusted USDT to `USDT_PARA_ID` should fail. +#[test] +fn reserve_transfer_assets_with_teleportable_asset_fails() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDT (0 total issuance) + let usdt_initial_local_amount = 42; + let (usdt_chain, usdt_chain_sovereign_account, usdt_id_multilocation) = + set_up_foreign_asset(USDT_PARA_ID, None, usdt_initial_local_amount, true); + + // transfer destination is USDT chain (foreign asset needs to go through its reserve chain) + let dest = usdt_chain; + let assets: MultiAssets = vec![(usdt_id_multilocation, FEE_AMOUNT).into()].into(); + let fee_index = 0; + + // balances checks before + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // do the transfer + let res = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + res, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + // Alice native asset is still same + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Alice USDT balance is still same + assert_eq!(Assets::balance(usdt_id_multilocation, ALICE), usdt_initial_local_amount); + // No USDT moved to sovereign account of reserve parachain + assert_eq!(Assets::balance(usdt_id_multilocation, usdt_chain_sovereign_account), 0); + // Verify total and active issuance of USDT are still the same + assert_eq!(Assets::total_issuance(usdt_id_multilocation), usdt_initial_local_amount); + assert_eq!(Assets::active_issuance(usdt_id_multilocation), usdt_initial_local_amount); + }); +} diff --git a/polkadot/xcm/pallet-xcm/src/tests.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs similarity index 68% rename from polkadot/xcm/pallet-xcm/src/tests.rs rename to polkadot/xcm/pallet-xcm/src/tests/mod.rs index d267eece2c0..72814e507f2 100644 --- a/polkadot/xcm/pallet-xcm/src/tests.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -14,6 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![cfg(test)] + +mod assets_transfer; + use crate::{ mock::*, AssetTraps, CurrentMigration, Error, LatestVersionedMultiLocation, Queries, QueryStatus, VersionDiscoveryQueue, VersionMigrationStage, VersionNotifiers, @@ -35,15 +39,15 @@ use xcm_executor::{ const ALICE: AccountId = AccountId::new([0u8; 32]); const BOB: AccountId = AccountId::new([1u8; 32]); -const PARA_ID: u32 = 2000; const INITIAL_BALANCE: u128 = 100; const SEND_AMOUNT: u128 = 10; +const FEE_AMOUNT: u128 = 2; #[test] fn report_outcome_notify_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); let mut message = @@ -56,7 +60,7 @@ fn report_outcome_notify_works() { new_test_ext_with_balances(balances).execute_with(|| { XcmPallet::report_outcome_notify( &mut message, - Parachain(PARA_ID).into_location(), + Parachain(OTHER_PARA_ID).into_location(), notify, 100, ) @@ -74,8 +78,8 @@ fn report_outcome_notify_works() { ); let querier: MultiLocation = Here.into(); let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(PARA_ID)).into(), - maybe_notify: Some((4, 2)), + responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), + maybe_notify: Some((5, 2)), timeout: 100, maybe_match_querier: Some(querier.into()), }; @@ -89,7 +93,7 @@ fn report_outcome_notify_works() { }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( - Parachain(PARA_ID), + Parachain(OTHER_PARA_ID), message, hash, Weight::from_parts(1_000_000_000, 1_000_000_000), @@ -99,13 +103,13 @@ fn report_outcome_notify_works() { last_events(2), vec![ RuntimeEvent::TestNotifier(pallet_test_notifier::Event::ResponseReceived( - Parachain(PARA_ID).into(), + Parachain(OTHER_PARA_ID).into(), 0, Response::ExecutionResult(None), )), RuntimeEvent::XcmPallet(crate::Event::Notified { query_id: 0, - pallet_index: 4, + pallet_index: 5, call_index: 2 }), ] @@ -118,13 +122,14 @@ fn report_outcome_notify_works() { fn report_outcome_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); let mut message = Xcm(vec![TransferAsset { assets: (Here, SEND_AMOUNT).into(), beneficiary: sender }]); new_test_ext_with_balances(balances).execute_with(|| { - XcmPallet::report_outcome(&mut message, Parachain(PARA_ID).into_location(), 100).unwrap(); + XcmPallet::report_outcome(&mut message, Parachain(OTHER_PARA_ID).into_location(), 100) + .unwrap(); assert_eq!( message, Xcm(vec![ @@ -138,7 +143,7 @@ fn report_outcome_works() { ); let querier: MultiLocation = Here.into(); let status = QueryStatus::Pending { - responder: MultiLocation::from(Parachain(PARA_ID)).into(), + responder: MultiLocation::from(Parachain(OTHER_PARA_ID)).into(), maybe_notify: None, timeout: 100, maybe_match_querier: Some(querier.into()), @@ -153,7 +158,7 @@ fn report_outcome_works() { }]); let hash = fake_message_hash(&message); let r = XcmExecutor::::execute_xcm( - Parachain(PARA_ID), + Parachain(OTHER_PARA_ID), message, hash, Weight::from_parts(1_000_000_000, 1_000_000_000), @@ -177,7 +182,7 @@ fn report_outcome_works() { fn custom_querier_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let querier: MultiLocation = @@ -281,7 +286,7 @@ fn custom_querier_works() { fn send_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: MultiLocation = AccountId32 { network: None, id: ALICE.into() }.into(); @@ -325,7 +330,7 @@ fn send_works() { fn send_fails_when_xcm_router_blocks() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let sender: MultiLocation = @@ -346,344 +351,6 @@ fn send_fails_when_xcm_router_blocks() { }); } -/// Test `teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_teleport_assets` -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn limited_teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Limited(Weight::from_parts(5000, 5000)), - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Here, SEND_AMOUNT), Weight::from_parts(5000, 5000)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_teleport_assets` with unlimited weight -/// -/// Asserts that the sender's balance is decreased as a result of execution of -/// local effects. -#[test] -fn unlimited_teleport_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 3; - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - let dest: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); - assert_ok!(XcmPallet::limited_teleport_assets( - RuntimeOrigin::signed(ALICE), - Box::new(RelayLocation::get().into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Unlimited, - )); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - RelayLocation::get().into(), - Xcm(vec![ - ReceiveTeleportedAsset((Here, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Here, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `reserve_transfer_assets` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `reserve_transfer_assets_with_paid_router_works` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -/// Verifies that XCM router fees (`SendXcm::validate` -> `MultiAssets`) are withdrawn from correct -/// user account and deposited to a correct target account (`XcmFeesTargetAccount`). -#[test] -fn reserve_transfer_assets_with_paid_router_works() { - let user_account = AccountId::from(XCM_FEES_NOT_WAIVED_USER_ACCOUNT); - let paid_para_id = Para3000::get(); - let balances = vec![ - (user_account.clone(), INITIAL_BALANCE), - (ParaId::from(paid_para_id).into_account_truncating(), INITIAL_BALANCE), - (XcmFeesTargetAccount::get(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let xcm_router_fee_amount = Para3000PaymentAmount::get(); - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = - Junction::AccountId32 { network: None, id: user_account.clone().into() }.into(); - assert_eq!(Balances::total_balance(&user_account), INITIAL_BALANCE); - assert_ok!(XcmPallet::reserve_transfer_assets( - RuntimeOrigin::signed(user_account.clone()), - Box::new(Parachain(paid_para_id).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - )); - // check event - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - - // XCM_FEES_NOT_WAIVED_USER_ACCOUNT spent amount - assert_eq!( - Balances::free_balance(user_account), - INITIAL_BALANCE - SEND_AMOUNT - xcm_router_fee_amount - ); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(paid_para_id).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - // XcmFeesTargetAccount where should lend xcm_router_fee_amount - assert_eq!( - Balances::free_balance(XcmFeesTargetAccount::get()), - INITIAL_BALANCE + xcm_router_fee_amount - ); - assert_eq!( - sent_xcm(), - vec![( - Parachain(paid_para_id).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_reserve_transfer_assets` -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn limited_reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Limited(Weight::from_parts(5000, 5000)), - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_limited_execution((Parent, SEND_AMOUNT), Weight::from_parts(5000, 5000)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - let versioned_sent = VersionedXcm::from(sent_xcm().into_iter().next().unwrap().1); - let _check_v2_ok: xcm::v2::Xcm<()> = versioned_sent.try_into().unwrap(); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - -/// Test `limited_reserve_transfer_assets` with unlimited weight purchasing -/// -/// Asserts that the sender's balance is decreased and the beneficiary's balance -/// is increased. Verifies the correct message is sent and event is emitted. -#[test] -fn unlimited_reserve_transfer_assets_works() { - let balances = vec![ - (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), - ]; - new_test_ext_with_balances(balances).execute_with(|| { - let weight = BaseXcmWeight::get() * 2; - let dest: MultiLocation = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); - assert_ok!(XcmPallet::limited_reserve_transfer_assets( - RuntimeOrigin::signed(ALICE), - Box::new(Parachain(PARA_ID).into()), - Box::new(dest.into()), - Box::new((Here, SEND_AMOUNT).into()), - 0, - WeightLimit::Unlimited, - )); - // Alice spent amount - assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE - SEND_AMOUNT); - // Destination account (parachain account) has amount - let para_acc: AccountId = ParaId::from(PARA_ID).into_account_truncating(); - assert_eq!(Balances::free_balance(para_acc), INITIAL_BALANCE + SEND_AMOUNT); - assert_eq!( - sent_xcm(), - vec![( - Parachain(PARA_ID).into(), - Xcm(vec![ - ReserveAssetDeposited((Parent, SEND_AMOUNT).into()), - ClearOrigin, - buy_execution((Parent, SEND_AMOUNT)), - DepositAsset { assets: AllCounted(1).into(), beneficiary: dest }, - ]), - )] - ); - assert_eq!( - last_event(), - RuntimeEvent::XcmPallet(crate::Event::Attempted { outcome: Outcome::Complete(weight) }) - ); - }); -} - /// Test local execution of XCM /// /// Asserts that the sender's balance is decreased and the beneficiary's balance @@ -692,7 +359,7 @@ fn unlimited_reserve_transfer_assets_works() { fn execute_withdraw_to_deposit_works() { let balances = vec![ (ALICE, INITIAL_BALANCE), - (ParaId::from(PARA_ID).into_account_truncating(), INITIAL_BALANCE), + (ParaId::from(OTHER_PARA_ID).into_account_truncating(), INITIAL_BALANCE), ]; new_test_ext_with_balances(balances).execute_with(|| { let weight = BaseXcmWeight::get() * 3; diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index 8a1575d9bc9..89e25984443 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -444,6 +444,21 @@ impl MultiLocation { } } } + + /// Return the MultiLocation subsection identifying the chain that `self` points to. + pub fn chain_location(&self) -> MultiLocation { + let mut clone = *self; + // start popping junctions until we reach chain identifier + while let Some(j) = clone.last() { + if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { + // return chain subsection + return clone + } else { + (clone, _) = clone.split_last_interior(); + } + } + MultiLocation::new(clone.parents, Junctions::Here) + } } impl TryFrom for MultiLocation { @@ -674,6 +689,57 @@ mod tests { assert_eq!(iter.next_back(), None); } + #[test] + fn chain_location_works() { + // Relay-chain or parachain context pointing to local resource, + let relay_to_local = MultiLocation::new(0, (PalletInstance(42), GeneralIndex(42))); + assert_eq!(relay_to_local.chain_location(), MultiLocation::here()); + + // Relay-chain context pointing to child parachain, + let relay_to_child = + MultiLocation::new(0, (Parachain(42), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(0, Parachain(42)); + assert_eq!(relay_to_child.chain_location(), expected); + + // Relay-chain context pointing to different consensus relay, + let relay_to_remote_relay = + MultiLocation::new(1, (GlobalConsensus(Kusama), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(1, GlobalConsensus(Kusama)); + assert_eq!(relay_to_remote_relay.chain_location(), expected); + + // Relay-chain context pointing to different consensus parachain, + let relay_to_remote_para = MultiLocation::new( + 1, + (GlobalConsensus(Kusama), Parachain(42), PalletInstance(42), GeneralIndex(42)), + ); + let expected = MultiLocation::new(1, (GlobalConsensus(Kusama), Parachain(42))); + assert_eq!(relay_to_remote_para.chain_location(), expected); + + // Parachain context pointing to relay chain, + let para_to_relay = MultiLocation::new(1, (PalletInstance(42), GeneralIndex(42))); + assert_eq!(para_to_relay.chain_location(), MultiLocation::parent()); + + // Parachain context pointing to sibling parachain, + let para_to_sibling = + MultiLocation::new(1, (Parachain(42), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(1, Parachain(42)); + assert_eq!(para_to_sibling.chain_location(), expected); + + // Parachain context pointing to different consensus relay, + let para_to_remote_relay = + MultiLocation::new(2, (GlobalConsensus(Kusama), PalletInstance(42), GeneralIndex(42))); + let expected = MultiLocation::new(2, GlobalConsensus(Kusama)); + assert_eq!(para_to_remote_relay.chain_location(), expected); + + // Parachain context pointing to different consensus parachain, + let para_to_remote_para = MultiLocation::new( + 2, + (GlobalConsensus(Kusama), Parachain(42), PalletInstance(42), GeneralIndex(42)), + ); + let expected = MultiLocation::new(2, (GlobalConsensus(Kusama), Parachain(42))); + assert_eq!(para_to_remote_para.chain_location(), expected); + } + #[test] fn conversion_from_other_types_works() { use crate::v2; diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index 3b13cab2c1e..c2b62751c68 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -81,10 +81,15 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFro instructions[..end] .matcher() .match_next_inst(|inst| match inst { - ReceiveTeleportedAsset(..) | ReserveAssetDeposited(..) => Ok(()), - WithdrawAsset(ref assets) if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION => Ok(()), - ClaimAsset { ref assets, .. } if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION => - Ok(()), + ReceiveTeleportedAsset(ref assets) | + ReserveAssetDeposited(ref assets) | + WithdrawAsset(ref assets) | + ClaimAsset { ref assets, .. } => + if assets.len() <= MAX_ASSETS_FOR_BUY_EXECUTION { + Ok(()) + } else { + Err(ProcessMessageError::BadFormat) + }, _ => Err(ProcessMessageError::BadFormat), })? .skip_inst_while(|inst| matches!(inst, ClearOrigin))? diff --git a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs index e51bd952177..78b9284c689 100644 --- a/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs +++ b/polkadot/xcm/xcm-builder/src/tests/pay/mock.rs @@ -246,11 +246,6 @@ type SovereignAccountOf = ( HashedDescription>, ); -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1000).into()); -} - impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -274,8 +269,6 @@ impl pallet_xcm::Config for Test { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 5fcba5e2f54..4f183c7a15b 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -210,11 +210,6 @@ impl xcm_executor::Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Here.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type UniversalLocation = UniversalLocation; @@ -239,8 +234,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index 9f0caa80617..d5edb1ea0f5 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -10,6 +10,7 @@ version = "1.0.0" impl-trait-for-tuples = "0.2.2" environmental = { version = "1.1.4", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive", "serde"] } xcm = { package = "staging-xcm", path = "..", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } @@ -34,6 +35,7 @@ std = [ "frame-support/std", "log/std", "parity-scale-codec/std", + "scale-info/std", "sp-arithmetic/std", "sp-core/std", "sp-io/std", diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e43d7a04899..ac256ea1489 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -32,7 +32,7 @@ pub mod traits; use traits::{ validate_export, AssetExchange, AssetLock, CallDispatcher, ClaimAssets, ConvertOrigin, DropAssets, Enact, ExportXcm, FeeManager, FeeReason, OnResponse, Properties, ShouldExecute, - TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, + TransactAsset, VersionChangeNotifier, WeightBounds, WeightTrader, XcmAssetTransfers, }; mod assets; @@ -254,6 +254,12 @@ impl ExecuteXcm for XcmExecutor XcmAssetTransfers for XcmExecutor { + type IsReserve = Config::IsReserve; + type IsTeleporter = Config::IsTeleporter; + type AssetTransactor = Config::AssetTransactor; +} + #[derive(Debug)] pub struct ExecutorError { pub index: u32, diff --git a/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs new file mode 100644 index 00000000000..5fdc9b15e01 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/traits/asset_transfer.rs @@ -0,0 +1,90 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::traits::TransactAsset; +use frame_support::traits::ContainsPair; +use scale_info::TypeInfo; +use sp_runtime::codec::{Decode, Encode}; +use xcm::prelude::*; + +/// Errors related to determining asset transfer support. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Error { + /// Invalid non-concrete asset. + NotConcrete, + /// Reserve chain could not be determined for assets. + UnknownReserve, +} + +/// Specify which type of asset transfer is required for a particular `(asset, dest)` combination. +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TransferType { + /// should teleport `asset` to `dest` + Teleport, + /// should reserve-transfer `asset` to `dest`, using local chain as reserve + LocalReserve, + /// should reserve-transfer `asset` to `dest`, using `dest` as reserve + DestinationReserve, + /// should reserve-transfer `asset` to `dest`, using remote chain `MultiLocation` as reserve + RemoteReserve(MultiLocation), +} + +/// A trait for identifying asset transfer type based on `IsTeleporter` and `IsReserve` +/// configurations. +pub trait XcmAssetTransfers { + /// Combinations of (Asset, Location) pairs which we trust as reserves. Meaning + /// reserve-based-transfers are to be used for assets matching this filter. + type IsReserve: ContainsPair; + + /// Combinations of (Asset, Location) pairs which we trust as teleporters. Meaning teleports are + /// to be used for assets matching this filter. + type IsTeleporter: ContainsPair; + + /// How to withdraw and deposit an asset. + type AssetTransactor: TransactAsset; + + /// Determine transfer type to be used for transferring `asset` from local chain to `dest`. + fn determine_for(asset: &MultiAsset, dest: &MultiLocation) -> Result { + if Self::IsTeleporter::contains(asset, dest) { + // we trust destination for teleporting asset + return Ok(TransferType::Teleport) + } else if Self::IsReserve::contains(asset, dest) { + // we trust destination as asset reserve location + return Ok(TransferType::DestinationReserve) + } + + // try to determine reserve location based on asset id/location + let asset_location = match asset.id { + Concrete(location) => Ok(location.chain_location()), + _ => Err(Error::NotConcrete), + }?; + if asset_location == MultiLocation::here() || + Self::IsTeleporter::contains(asset, &asset_location) + { + // if the asset is local, then it's a local reserve + // it's also a local reserve if the asset's location is not `here` but it's a location + // where it can be teleported to `here` => local reserve + Ok(TransferType::LocalReserve) + } else if Self::IsReserve::contains(asset, &asset_location) { + // remote location that is recognized as reserve location for asset + Ok(TransferType::RemoteReserve(asset_location)) + } else { + // remote location that is not configured either as teleporter or reserve => cannot + // determine asset reserve + Err(Error::UnknownReserve) + } + } +} diff --git a/polkadot/xcm/xcm-executor/src/traits/mod.rs b/polkadot/xcm/xcm-executor/src/traits/mod.rs index a9439968fa6..71e75c77e93 100644 --- a/polkadot/xcm/xcm-executor/src/traits/mod.rs +++ b/polkadot/xcm/xcm-executor/src/traits/mod.rs @@ -20,10 +20,12 @@ mod conversion; pub use conversion::{CallDispatcher, ConvertLocation, ConvertOrigin, WithOriginFilter}; mod drop_assets; pub use drop_assets::{ClaimAssets, DropAssets}; -mod asset_lock; -pub use asset_lock::{AssetLock, Enact, LockError}; mod asset_exchange; pub use asset_exchange::AssetExchange; +mod asset_lock; +pub use asset_lock::{AssetLock, Enact, LockError}; +mod asset_transfer; +pub use asset_transfer::{Error as AssetTransferError, TransferType, XcmAssetTransfers}; mod export; pub use export::{export_xcm, validate_export, ExportXcm}; mod fee_manager; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index fa9d3300619..9f0411970ce 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -399,11 +399,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - pub struct TrustedLockerCase(PhantomData); impl> ContainsPair for TrustedLockerCase @@ -443,8 +438,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index 0fba4cb270d..bdd7ff6d3ea 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -199,11 +199,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -228,8 +223,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index f9ad0252285..41234837aca 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -313,11 +313,6 @@ impl mock_msg_queue::Config for Runtime { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parent.into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = EnsureXcmOrigin; @@ -341,8 +336,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = frame_support::traits::ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index 756cf4803b1..c9a57db970a 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -163,11 +163,6 @@ impl Config for XcmConfig { pub type LocalOriginToLocation = SignedToAccountId32; -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub ReachableDest: Option = Some(Parachain(1).into()); -} - impl pallet_xcm::Config for Runtime { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -192,8 +187,6 @@ impl pallet_xcm::Config for Runtime { type MaxRemoteLockConsumers = ConstU32<0>; type RemoteLockConsumerIdentifier = (); type WeightInfo = pallet_xcm::TestWeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type ReachableDest = ReachableDest; type AdminOrigin = EnsureRoot; } -- GitLab From c7bd8804389e4b9b7f9d0c39b06094aebb303e5d Mon Sep 17 00:00:00 2001 From: Lulu Date: Mon, 13 Nov 2023 19:10:12 +0000 Subject: [PATCH 036/199] Add CI to claim crates (#2299) --- .github/workflows/claim-crates.yml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .github/workflows/claim-crates.yml diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/claim-crates.yml new file mode 100644 index 00000000000..a1d28d42828 --- /dev/null +++ b/.github/workflows/claim-crates.yml @@ -0,0 +1,25 @@ +name: Claim Crates + +on: + push: + branches: + - master + +jobs: + claim-crates: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 + + - name: Rust Cache + uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 + with: + cache-on-failure: true + + - name: install parity-publish + run: cargo install parity-publish@0.2.0 + + - name: parity-publish claim + env: + PARITY_PUBLISH_CRATESIO_TOKEN: ${{ secrets.CRATESIO_PUBLISH_CLAIM_TOKEN }} + run: parity-publish claim -- GitLab From f332d6881d4275886f5d738ae9eee96d1ced9c94 Mon Sep 17 00:00:00 2001 From: Assem Date: Mon, 13 Nov 2023 20:34:27 +0100 Subject: [PATCH 037/199] =?UTF-8?q?doc(client/cli/src/arg=5Fenums.rs):=20f?= =?UTF-8?q?ix=20typo=20=E2=9C=8D=EF=B8=8F=20(#2298)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- substrate/client/cli/src/arg_enums.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index c55b97675da..d4a4b7cfdf6 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -225,7 +225,7 @@ pub enum OffchainWorkerEnabled { #[derive(Debug, Clone, Copy, ValueEnum, PartialEq)] #[value(rename_all = "kebab-case")] pub enum SyncMode { - /// Full sync. Download end verify all blocks. + /// Full sync. Download and verify all blocks. Full, /// Download blocks without executing them. Download latest state with proofs. Fast, -- GitLab From 8d2637905ba920dd1f0e8f1212ec98e45420f514 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Mon, 13 Nov 2023 21:17:36 +0100 Subject: [PATCH 038/199] review-bot: trigger only on review approvals (#2289) Moved the review event of review-bot to only be triggered in approvals. Because we only update the required reviews when someone approves, this will stop the bot from immediately requesting a new review when someone comments or request changes as they should have been already notified in the first batch. --- .github/workflows/review-trigger.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 1ae6b79ffbd..2810ea356e6 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -13,7 +13,8 @@ on: jobs: trigger-review-bot: - if: github.event.pull_request.draft != true + # (It is not a draft) && (it is not a review || it is an approving review) + if: ${{ github.event.pull_request.draft != true && (github.event_name != 'pull_request_review' || (github.event.review && github.event.review.state == 'APPROVED')) }} runs-on: ubuntu-latest name: trigger review bot steps: -- GitLab From 689b9d91c7e95660682302510b89a5f296098a10 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 14 Nov 2023 10:37:41 +0200 Subject: [PATCH 039/199] cumulus-pov-recovery: check pov_hash instead of reencoding data (#2287) Collators were previously reencoding the available data and checking the erasure root. Replace that with just checking the PoV hash, which consumes much less CPU and takes less time. We also don't need to check the `PersistedValidationData` hash, as collators don't use it. Reason: https://github.com/paritytech/polkadot-sdk/issues/575#issuecomment-1806572230 After systematic chunks recovery is merged, collators will no longer do any reed-solomon encoding/decoding, which has proven to be a great CPU consumer. Signed-off-by: alindima --- .../src/active_candidate_recovery.rs | 15 +- cumulus/client/pov-recovery/src/lib.rs | 38 ++--- .../src/collator_overseer.rs | 2 +- .../network/availability-recovery/src/lib.rs | 39 ++++- .../network/availability-recovery/src/task.rs | 138 +++++++++++------- 5 files changed, 141 insertions(+), 91 deletions(-) diff --git a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs index 322b19c796a..2c635320ff4 100644 --- a/cumulus/client/pov-recovery/src/active_candidate_recovery.rs +++ b/cumulus/client/pov-recovery/src/active_candidate_recovery.rs @@ -16,12 +16,12 @@ use sp_runtime::traits::Block as BlockT; -use polkadot_node_primitives::AvailableData; +use polkadot_node_primitives::PoV; use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use futures::{channel::oneshot, stream::FuturesUnordered, Future, FutureExt, StreamExt}; -use std::{collections::HashSet, pin::Pin}; +use std::{collections::HashSet, pin::Pin, sync::Arc}; use crate::RecoveryHandle; @@ -30,9 +30,8 @@ use crate::RecoveryHandle; /// This handles the candidate recovery and tracks the activate recoveries. pub(crate) struct ActiveCandidateRecovery { /// The recoveries that are currently being executed. - recoveries: FuturesUnordered< - Pin)> + Send>>, - >, + recoveries: + FuturesUnordered>)> + Send>>>, /// The block hashes of the candidates currently being recovered. candidates: HashSet, recovery_handle: Box, @@ -68,7 +67,7 @@ impl ActiveCandidateRecovery { self.recoveries.push( async move { match rx.await { - Ok(Ok(res)) => (block_hash, Some(res)), + Ok(Ok(res)) => (block_hash, Some(res.pov)), Ok(Err(error)) => { tracing::debug!( target: crate::LOG_TARGET, @@ -93,8 +92,8 @@ impl ActiveCandidateRecovery { /// Waits for the next recovery. /// - /// If the returned [`AvailableData`] is `None`, it means that the recovery failed. - pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option) { + /// If the returned [`PoV`] is `None`, it means that the recovery failed. + pub async fn wait_for_recovery(&mut self) -> (Block::Hash, Option>) { loop { if let Some(res) = self.recoveries.next().await { self.candidates.remove(&res.0); diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index b050bc66799..b9a140f55c6 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -51,7 +51,7 @@ use sc_consensus::import_queue::{ImportQueueService, IncomingBlock}; use sp_consensus::{BlockOrigin, BlockStatus, SyncOracle}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use polkadot_node_primitives::{AvailableData, POV_BOMB_LIMIT}; +use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT}; use polkadot_node_subsystem::messages::AvailabilityRecoveryMessage; use polkadot_overseer::Handle as OverseerHandle; use polkadot_primitives::{ @@ -346,15 +346,11 @@ where } /// Handle a recovered candidate. - async fn handle_candidate_recovered( - &mut self, - block_hash: Block::Hash, - available_data: Option, - ) { - let available_data = match available_data { - Some(data) => { + async fn handle_candidate_recovered(&mut self, block_hash: Block::Hash, pov: Option<&PoV>) { + let pov = match pov { + Some(pov) => { self.candidates_in_retry.remove(&block_hash); - data + pov }, None => if self.candidates_in_retry.insert(block_hash) { @@ -373,18 +369,16 @@ where }, }; - let raw_block_data = match sp_maybe_compressed_blob::decompress( - &available_data.pov.block_data.0, - POV_BOMB_LIMIT, - ) { - Ok(r) => r, - Err(error) => { - tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV"); + let raw_block_data = + match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) { + Ok(r) => r, + Err(error) => { + tracing::debug!(target: LOG_TARGET, ?error, "Failed to decompress PoV"); - self.reset_candidate(block_hash); - return - }, - }; + self.reset_candidate(block_hash); + return + }, + }; let block_data = match ParachainBlockData::::decode(&mut &raw_block_data[..]) { Ok(d) => d, @@ -595,10 +589,10 @@ where next_to_recover = self.candidate_recovery_queue.next_recovery().fuse() => { self.recover_candidate(next_to_recover).await; }, - (block_hash, available_data) = + (block_hash, pov) = self.active_candidate_recovery.wait_for_recovery().fuse() => { - self.handle_candidate_recovered(block_hash, available_data).await; + self.handle_candidate_recovered(block_hash, pov.as_deref()).await; }, } } diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 379217e4a63..945344f85e9 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -102,7 +102,7 @@ fn build_overseer( let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; let builder = Overseer::builder() .availability_distribution(DummySubsystem) - .availability_recovery(AvailabilityRecoverySubsystem::with_availability_store_skip( + .availability_recovery(AvailabilityRecoverySubsystem::for_collator( available_data_req_receiver, Metrics::register(registry)?, )) diff --git a/polkadot/node/network/availability-recovery/src/lib.rs b/polkadot/node/network/availability-recovery/src/lib.rs index 9acc48ea92e..4a658449f09 100644 --- a/polkadot/node/network/availability-recovery/src/lib.rs +++ b/polkadot/node/network/availability-recovery/src/lib.rs @@ -105,6 +105,17 @@ pub struct AvailabilityRecoverySubsystem { req_receiver: IncomingRequestReceiver, /// Metrics for this subsystem. metrics: Metrics, + /// The type of check to perform after available data was recovered. + post_recovery_check: PostRecoveryCheck, +} + +#[derive(Clone, PartialEq, Debug)] +/// The type of check to perform after available data was recovered. +pub enum PostRecoveryCheck { + /// Reencode the data and check erasure root. For validators. + Reencode, + /// Only check the pov hash. For collators only. + PovHash, } /// Expensive erasure coding computations that we want to run on a blocking thread. @@ -344,6 +355,7 @@ async fn launch_recovery_task( metrics: &Metrics, recovery_strategies: VecDeque::Sender>>>, bypass_availability_store: bool, + post_recovery_check: PostRecoveryCheck, ) -> error::Result<()> { let candidate_hash = receipt.hash(); let params = RecoveryParams { @@ -354,6 +366,8 @@ async fn launch_recovery_task( erasure_root: receipt.descriptor.erasure_root, metrics: metrics.clone(), bypass_availability_store, + post_recovery_check, + pov_hash: receipt.descriptor.pov_hash, }; let recovery_task = RecoveryTask::new(ctx.sender().clone(), params, recovery_strategies); @@ -390,6 +404,7 @@ async fn handle_recover( erasure_task_tx: futures::channel::mpsc::Sender, recovery_strategy_kind: RecoveryStrategyKind, bypass_availability_store: bool, + post_recovery_check: PostRecoveryCheck, ) -> error::Result<()> { let candidate_hash = receipt.hash(); @@ -486,6 +501,7 @@ async fn handle_recover( metrics, recovery_strategies, bypass_availability_store, + post_recovery_check, ) .await }, @@ -527,15 +543,17 @@ async fn query_chunk_size( #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] impl AvailabilityRecoverySubsystem { - /// Create a new instance of `AvailabilityRecoverySubsystem` which never requests the - /// `AvailabilityStoreSubsystem` subsystem. - pub fn with_availability_store_skip( + /// Create a new instance of `AvailabilityRecoverySubsystem` suitable for collator nodes, + /// which never requests the `AvailabilityStoreSubsystem` subsystem and only checks the POV hash + /// instead of reencoding the available data. + pub fn for_collator( req_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> Self { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), bypass_availability_store: true, + post_recovery_check: PostRecoveryCheck::PovHash, req_receiver, metrics, } @@ -550,6 +568,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstAlways, bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -563,6 +582,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::ChunksAlways, bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -577,6 +597,7 @@ impl AvailabilityRecoverySubsystem { Self { recovery_strategy_kind: RecoveryStrategyKind::BackersFirstIfSizeLower(SMALL_POV_LIMIT), bypass_availability_store: false, + post_recovery_check: PostRecoveryCheck::Reencode, req_receiver, metrics, } @@ -584,8 +605,13 @@ impl AvailabilityRecoverySubsystem { async fn run(self, mut ctx: Context) -> SubsystemResult<()> { let mut state = State::default(); - let Self { mut req_receiver, metrics, recovery_strategy_kind, bypass_availability_store } = - self; + let Self { + mut req_receiver, + metrics, + recovery_strategy_kind, + bypass_availability_store, + post_recovery_check, + } = self; let (erasure_task_tx, erasure_task_rx) = futures::channel::mpsc::channel(16); let mut erasure_task_rx = erasure_task_rx.fuse(); @@ -675,7 +701,8 @@ impl AvailabilityRecoverySubsystem { &metrics, erasure_task_tx.clone(), recovery_strategy_kind.clone(), - bypass_availability_store + bypass_availability_store, + post_recovery_check.clone() ).await { gum::warn!( target: LOG_TARGET, diff --git a/polkadot/node/network/availability-recovery/src/task.rs b/polkadot/node/network/availability-recovery/src/task.rs index d5bc2da8494..f705d5c0e4c 100644 --- a/polkadot/node/network/availability-recovery/src/task.rs +++ b/polkadot/node/network/availability-recovery/src/task.rs @@ -20,7 +20,7 @@ use crate::{ futures_undead::FuturesUndead, is_chunk_valid, is_unavailable, metrics::Metrics, ErasureTask, - LOG_TARGET, + PostRecoveryCheck, LOG_TARGET, }; use futures::{channel::oneshot, SinkExt}; #[cfg(not(test))] @@ -95,6 +95,12 @@ pub struct RecoveryParams { /// Do not request data from availability-store. Useful for collators. pub bypass_availability_store: bool, + + /// The type of check to perform after available data was recovered. + pub post_recovery_check: PostRecoveryCheck, + + /// The blake2-256 hash of the PoV. + pub pov_hash: Hash, } /// Intermediate/common data that must be passed between `RecoveryStrategy`s belonging to the @@ -501,39 +507,48 @@ impl RecoveryStrategy match response.await { Ok(req_res::v1::AvailableDataFetchingResponse::AvailableData(data)) => { - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.params - .erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - "Received full data", - ); + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.params + .erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)? + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data), + }; - return Ok(data) - } else { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - ?validator_index, - "Invalid data response", - ); + match maybe_data { + Some(data) => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + "Received full data", + ); - // it doesn't help to report the peer with req/res. - } + return Ok(data) + }, + None => { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + ?validator_index, + "Invalid data response", + ); + + // it doesn't help to report the peer with req/res. + // we'll try the next backer. + }, + }; }, Ok(req_res::v1::AvailableDataFetchingResponse::NoSuchData) => {}, Err(e) => gum::debug!( @@ -647,22 +662,43 @@ impl FetchChunks { match available_data_response { Ok(data) => { - // Send request to re-encode the chunks and check merkle root. - let (reencode_tx, reencode_rx) = oneshot::channel(); - self.erasure_task_tx - .send(ErasureTask::Reencode( - common_params.n_validators, - common_params.erasure_root, - data, - reencode_tx, - )) - .await - .map_err(|_| RecoveryError::ChannelClosed)?; - - let reencode_response = - reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?; - - if let Some(data) = reencode_response { + let maybe_data = match common_params.post_recovery_check { + PostRecoveryCheck::Reencode => { + // Send request to re-encode the chunks and check merkle root. + let (reencode_tx, reencode_rx) = oneshot::channel(); + self.erasure_task_tx + .send(ErasureTask::Reencode( + common_params.n_validators, + common_params.erasure_root, + data, + reencode_tx, + )) + .await + .map_err(|_| RecoveryError::ChannelClosed)?; + + reencode_rx.await.map_err(|_| RecoveryError::ChannelClosed)?.or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + erasure_root = ?common_params.erasure_root, + "Data recovery error - root mismatch", + ); + None + }) + }, + PostRecoveryCheck::PovHash => + (data.pov.hash() == common_params.pov_hash).then_some(data).or_else(|| { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?common_params.candidate_hash, + pov_hash = ?common_params.pov_hash, + "Data recovery error - PoV hash mismatch", + ); + None + }), + }; + + if let Some(data) = maybe_data { gum::trace!( target: LOG_TARGET, candidate_hash = ?common_params.candidate_hash, @@ -673,12 +709,6 @@ impl FetchChunks { Ok(data) } else { recovery_duration.map(|rd| rd.stop_and_discard()); - gum::trace!( - target: LOG_TARGET, - candidate_hash = ?common_params.candidate_hash, - erasure_root = ?common_params.erasure_root, - "Data recovery error - root mismatch", - ); Err(RecoveryError::Invalid) } -- GitLab From b371c3574190ace0d8dd89b7970a388ad3fa8a6a Mon Sep 17 00:00:00 2001 From: drskalman <35698397+drskalman@users.noreply.github.com> Date: Tue, 14 Nov 2023 08:39:44 +0000 Subject: [PATCH 040/199] Fix `ecdsa_bls` verify in BEEFY primitives (#2066) BEEFY ECDSA signatures are on keccak has of the messages. As such we can not simply call `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` because that invokes ecdsa default verification which perfoms blake2 hash which we don't want. This bring up the second issue makes: This makes `sign` and `verify` function in `pair_crypto` useless, at least for BEEFY use case. Moreover, there is no obvious clean way to generate the signature given that pair_crypto does not exposes `sign_prehashed`. You could in theory query the keystore for the pair (could you?), invoke `to_raw` and re-generate each sub-pair and sign using each. But that sounds extremely anticlimactic and will be frow upon by auditors . So I appreciate any alternative suggestion. --------- Co-authored-by: Davide Galassi Co-authored-by: Robert Hambrock --- .../primitives/consensus/beefy/src/lib.rs | 34 ++++++--- .../primitives/core/src/paired_crypto.rs | 76 ++++++++++++++++++- 2 files changed, 96 insertions(+), 14 deletions(-) diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 5bdf8ce010a..e31c53237be 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -133,7 +133,7 @@ pub mod bls_crypto { ::Output: Into<[u8; 32]>, { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { - // `w3f-bls` library uses IETF hashing standard and as such does not exposes + // `w3f-bls` library uses IETF hashing standard and as such does not expose // a choice of hash to field function. // We are directly calling into the library to avoid introducing new host call. // and because BeefyAuthorityId::verify is being called in the runtime so we don't have @@ -157,7 +157,7 @@ pub mod bls_crypto { pub mod ecdsa_bls_crypto { use super::{BeefyAuthorityId, Hash, RuntimeAppPublic, KEY_TYPE}; use sp_application_crypto::{app_crypto, ecdsa_bls377}; - use sp_core::{crypto::Wraps, ecdsa_bls377::Pair as EcdsaBlsPair, Pair as _}; + use sp_core::{crypto::Wraps, ecdsa_bls377::Pair as EcdsaBlsPair}; app_crypto!(ecdsa_bls377, KEY_TYPE); @@ -167,17 +167,24 @@ pub mod ecdsa_bls_crypto { /// Signature for a BEEFY authority using (ECDSA,BLS) as its crypto. pub type AuthoritySignature = Signature; - impl BeefyAuthorityId for AuthorityId + impl BeefyAuthorityId for AuthorityId where - ::Output: Into<[u8; 32]>, + H: Hash, + H::Output: Into<[u8; 32]>, { fn verify(&self, signature: &::Signature, msg: &[u8]) -> bool { - // `w3f-bls` library uses IETF hashing standard and as such does not exposes - // a choice of hash to field function. - // We are directly calling into the library to avoid introducing new host call. - // and because BeefyAuthorityId::verify is being called in the runtime so we don't have - - EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref()) + // We can not simply call + // `EcdsaBlsPair::verify(signature.as_inner_ref(), msg, self.as_inner_ref())` + // because that invokes ECDSA default verification which perfoms Blake2b hash + // which we don't want. This is because ECDSA signatures are meant to be verified + // on Ethereum network where Keccak hasher is significantly cheaper than Blake2b. + // See Figure 3 of [OnSc21](https://www.scitepress.org/Papers/2021/106066/106066.pdf) + // for comparison. + EcdsaBlsPair::verify_with_hasher::( + signature.as_inner_ref(), + msg, + self.as_inner_ref(), + ) } } } @@ -257,6 +264,7 @@ pub enum ConsensusLog { /// /// A vote message is a direct vote created by a BEEFY node on every voting round /// and is gossiped to its peers. +// TODO: Remove `Signature` generic type, instead get it from `Id::Signature`. #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct VoteMessage { /// Commit to information extracted from a finalized block @@ -507,11 +515,15 @@ mod tests { let msg = &b"test-message"[..]; let (pair, _) = ecdsa_bls_crypto::Pair::generate(); - let signature: ecdsa_bls_crypto::Signature = pair.as_inner_ref().sign(&msg).into(); + let signature: ecdsa_bls_crypto::Signature = + pair.as_inner_ref().sign_with_hasher::(&msg).into(); // Verification works if same hashing function is used when signing and verifying. assert!(BeefyAuthorityId::::verify(&pair.public(), &signature, msg)); + // Verification doesn't work if we verify function provided by pair_crypto implementation + assert!(!ecdsa_bls_crypto::Pair::verify(&signature, msg, &pair.public())); + // Other public key doesn't work let (other_pair, _) = ecdsa_bls_crypto::Pair::generate(); assert!(!BeefyAuthorityId::::verify(&other_pair.public(), &signature, msg,)); diff --git a/substrate/primitives/core/src/paired_crypto.rs b/substrate/primitives/core/src/paired_crypto.rs index a97b657e757..edf6156f268 100644 --- a/substrate/primitives/core/src/paired_crypto.rs +++ b/substrate/primitives/core/src/paired_crypto.rs @@ -39,7 +39,13 @@ use sp_std::convert::TryFrom; /// ECDSA and BLS12-377 paired crypto scheme #[cfg(feature = "bls-experimental")] pub mod ecdsa_bls377 { - use crate::{bls377, crypto::CryptoTypeId, ecdsa}; + #[cfg(feature = "full_crypto")] + use crate::Hasher; + use crate::{ + bls377, + crypto::{CryptoTypeId, Pair as PairT, UncheckedFrom}, + ecdsa, + }; /// An identifier used to match public keys against BLS12-377 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecb7"); @@ -71,6 +77,60 @@ pub mod ecdsa_bls377 { impl super::CryptoType for Pair { type Pair = Pair; } + + #[cfg(feature = "full_crypto")] + impl Pair { + /// Hashes the `message` with the specified [`Hasher`] before signing sith the ECDSA secret + /// component. + /// + /// The hasher does not affect the BLS12-377 component. This generates BLS12-377 Signature + /// according to IETF standard. + pub fn sign_with_hasher(&self, message: &[u8]) -> Signature + where + H: Hasher, + H::Out: Into<[u8; 32]>, + { + let msg_hash = H::hash(message).into(); + + let mut raw: [u8; SIGNATURE_LEN] = [0u8; SIGNATURE_LEN]; + raw[..ecdsa::SIGNATURE_SERIALIZED_SIZE] + .copy_from_slice(self.left.sign_prehashed(&msg_hash).as_ref()); + raw[ecdsa::SIGNATURE_SERIALIZED_SIZE..] + .copy_from_slice(self.right.sign(message).as_ref()); + ::Signature::unchecked_from(raw) + } + + /// Hashes the `message` with the specified [`Hasher`] before verifying with the ECDSA + /// public component. + /// + /// The hasher does not affect the the BLS12-377 component. This verifies whether the + /// BLS12-377 signature was hashed and signed according to IETF standard + pub fn verify_with_hasher(sig: &Signature, message: &[u8], public: &Public) -> bool + where + H: Hasher, + H::Out: Into<[u8; 32]>, + { + let msg_hash = H::hash(message).into(); + + let Ok(left_pub) = public.0[..ecdsa::PUBLIC_KEY_SERIALIZED_SIZE].try_into() else { + return false + }; + let Ok(left_sig) = sig.0[0..ecdsa::SIGNATURE_SERIALIZED_SIZE].try_into() else { + return false + }; + if !ecdsa::Pair::verify_prehashed(&left_sig, &msg_hash, &left_pub) { + return false + } + + let Ok(right_pub) = public.0[ecdsa::PUBLIC_KEY_SERIALIZED_SIZE..].try_into() else { + return false + }; + let Ok(right_sig) = sig.0[ecdsa::SIGNATURE_SERIALIZED_SIZE..].try_into() else { + return false + }; + bls377::Pair::verify(&right_sig, message.as_ref(), &right_pub) + } + } } /// Secure seed length. @@ -455,12 +515,12 @@ where #[cfg(all(test, feature = "bls-experimental"))] mod test { use super::*; - use crate::crypto::DEV_PHRASE; + use crate::{crypto::DEV_PHRASE, KeccakHasher}; use ecdsa_bls377::{Pair, Signature}; use crate::{bls377, ecdsa}; - #[test] + #[test] fn test_length_of_paired_ecdsa_and_bls377_public_key_and_signature_is_correct() { assert_eq!( ::Public::LEN, @@ -617,6 +677,16 @@ mod test { assert_eq!(cmp, public); } + #[test] + fn sign_and_verify_with_hasher_works() { + let pair = + Pair::from_seed(&(b"12345678901234567890123456789012".as_slice().try_into().unwrap())); + let message = b"Something important"; + let signature = pair.sign_with_hasher::(&message[..]); + + assert!(Pair::verify_with_hasher::(&signature, &message[..], &pair.public())); + } + #[test] fn signature_serialization_works() { let pair = -- GitLab From ae1bdcfb91a26c5f65c5ca534aa8a04523ca2277 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 14 Nov 2023 10:43:40 +0100 Subject: [PATCH 041/199] Fix `expect_pallet` benchmarks not relaying on hard-coded `frame_system` dependency version (#2288) ## Problem/Motivation The benchmark for the `ExpectPallet` XCM instruction uses a hard-coded version `4.0.0` for the `frame_system` pallet. Unfortunately, this doesn't work for the `polkadot-fellows/runtimes` repository, where we use dependencies from `crates.io`, e.g., [frame-system::23.0.0.0](https://github.com/polkadot-fellows/runtimes/blob/dd7f86f0d50064481ed0b7c0218494a5cfad997e/relay/kusama/Cargo.toml#L83). Closes: https://github.com/paritytech/polkadot-sdk/issues/2284 ## Solution This PR fixes the benchmarks that require pallet information and enables the runtime to provide the correct/custom pallet information. The default implementation provides `frame_system::Pallet` with index `0`, where the version is not hard-coded but read from the runtime. ## Local testing Added log for `T::valid_pallet` to the benchmarks like: ``` let valid_pallet = T::valid_pallet(); log::info!( target: "frame::benchmark::pallet", "valid_pallet: {}::{}::{}::{}::{}", valid_pallet.index, valid_pallet.module_name, valid_pallet.crate_version.major, valid_pallet.crate_version.minor, valid_pallet.crate_version.patch, ); ``` Run benchmarks for `westend`: ``` cargo run --bin=polkadot --features=runtime-benchmarks -- benchmark pallet --steps=2 --repeat=1 --extrinsic=* --heap-pages=4096 --json-file=./bench.json --chain=westend-dev --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs --pallet=pallet_xcm_benchmarks::generic --output=./polkadot/runtime/westend/src/weights/xcm ``` --- For actual `frame_system` version: ``` [package] name = "frame-system" version = "4.0.0-dev" ``` Log dump: ``` 2023-11-13 12:56:45 Starting benchmark: pallet_xcm_benchmarks::generic::query_pallet 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 Starting benchmark: pallet_xcm_benchmarks::generic::expect_pallet 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 2023-11-13 12:56:45 valid_pallet: 0::frame_system::4::0::0 ``` For changed `frame_system` version: ``` [package] name = "frame-system" version = "5.1.3-dev" ``` Log dump: ``` 2023-11-13 12:51:51 Starting benchmark: pallet_xcm_benchmarks::generic::query_pallet 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 Starting benchmark: pallet_xcm_benchmarks::generic::expect_pallet 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 2023-11-13 12:51:51 valid_pallet: 0::frame_system::5::1::3 ``` ## References Closes: https://github.com/paritytech/polkadot-sdk/issues/2284 --- .../src/generic/benchmarking.rs | 15 ++++++++------- .../xcm/pallet-xcm-benchmarks/src/generic/mod.rs | 12 ++++++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index 4a997666027..f1c48ba9b83 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -413,8 +413,9 @@ benchmarks! { executor.set_holding(expected_assets_in_holding.into()); } + let valid_pallet = T::valid_pallet(); let instruction = Instruction::QueryPallet { - module_name: b"frame_system".to_vec(), + module_name: valid_pallet.module_name.as_bytes().to_vec(), response_info: QueryResponseInfo { destination, query_id, max_weight }, }; let xcm = Xcm(vec![instruction]); @@ -428,13 +429,13 @@ benchmarks! { expect_pallet { let mut executor = new_executor::(Default::default()); - + let valid_pallet = T::valid_pallet(); let instruction = Instruction::ExpectPallet { - index: 0, - name: b"System".to_vec(), - module_name: b"frame_system".to_vec(), - crate_major: 4, - min_crate_minor: 0, + index: valid_pallet.index as u32, + name: valid_pallet.name.as_bytes().to_vec(), + module_name: valid_pallet.module_name.as_bytes().to_vec(), + crate_major: valid_pallet.crate_version.major.into(), + min_crate_minor: valid_pallet.crate_version.minor.into(), }; let xcm = Xcm(vec![instruction]); }: { diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs index cbdfa8d0112..11f7bba19a9 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mod.rs @@ -91,6 +91,18 @@ pub mod pallet { /// /// If set to `Err`, benchmarks which rely on a universal alias will be skipped. fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError>; + + /// Returns a valid pallet info for `ExpectPallet` or `QueryPallet` benchmark. + /// + /// By default returns `frame_system::Pallet` info with expected pallet index `0`. + fn valid_pallet() -> frame_support::traits::PalletInfoData { + frame_support::traits::PalletInfoData { + index: as frame_support::traits::PalletInfoAccess>::index(), + name: as frame_support::traits::PalletInfoAccess>::name(), + module_name: as frame_support::traits::PalletInfoAccess>::module_name(), + crate_version: as frame_support::traits::PalletInfoAccess>::crate_version(), + } + } } #[pallet::pallet] -- GitLab From 54ca4f131b82f45b0c2d1f316d65d7c97ad9a99b Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Tue, 14 Nov 2023 15:01:15 +0400 Subject: [PATCH 042/199] Delete undecodable Westend Asset Hub `Balances::Hold` and `Nfts::ItemMetadataOf` (#2309) Closes https://github.com/paritytech/polkadot-sdk/issues/2241 See issue comments for more details about this storage. --- .../assets/asset-hub-westend/src/lib.rs | 78 ++++++++++++++++++- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index d88aa2607e2..cd17b9d86f7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -54,7 +54,7 @@ use frame_system::{ EnsureRoot, EnsureSigned, EnsureSignedBy, }; use pallet_asset_conversion_tx_payment::AssetConversionAdapter; -use pallet_nfts::PalletFeatures; +use pallet_nfts::{DestroyWitness, PalletFeatures}; use pallet_xcm::EnsureXcm; pub use parachains_common as common; use parachains_common::{ @@ -69,7 +69,9 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, + traits::{ + AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify, + }, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, }; @@ -944,8 +946,79 @@ pub type Migrations = ( pallet_multisig::migrations::v1::MigrateToV1, // unreleased InitStorageVersions, + // unreleased + DeleteUndecodableStorage, ); +/// Asset Hub Westend has some undecodable storage, delete it. +/// See for more info. +/// +/// First we remove the bad Hold, then the bad NFT collection. +pub struct DeleteUndecodableStorage; + +impl frame_support::traits::OnRuntimeUpgrade for DeleteUndecodableStorage { + fn on_runtime_upgrade() -> Weight { + use sp_core::crypto::Ss58Codec; + + let mut writes = 0; + + // Remove Holds for account with undecodable hold + // Westend doesn't have any HoldReasons implemented yet, so it's safe to just blanket remove + // any for this account. + match AccountId::from_ss58check("5GCCJthVSwNXRpbeg44gysJUx9vzjdGdfWhioeM7gCg6VyXf") { + Ok(a) => { + log::info!("Removing holds for account with bad hold"); + pallet_balances::Holds::::remove(a); + writes.saturating_inc(); + }, + Err(_) => { + log::error!("CleanupUndecodableStorage: Somehow failed to convert valid SS58 address into an AccountId!"); + }, + }; + + // Destroy undecodable NFT item 1 + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_burn(3, 1, |_| Ok(())) { + Ok(_) => { + log::info!("Destroyed undecodable NFT item 1"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT item: {:?}", e); + return ::DbWeight::get().reads_writes(0, writes) + }, + } + + // Destroy undecodable NFT item 2 + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_burn(3, 2, |_| Ok(())) { + Ok(_) => { + log::info!("Destroyed undecodable NFT item 2"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT item: {:?}", e); + return ::DbWeight::get().reads_writes(0, writes) + }, + } + + // Finally, we can destroy the collection + writes.saturating_inc(); + match pallet_nfts::Pallet::::do_destroy_collection( + 3, + DestroyWitness { attributes: 0, item_metadatas: 1, item_configs: 0 }, + None, + ) { + Ok(_) => { + log::info!("Destroyed undecodable NFT collection"); + }, + Err(e) => { + log::error!("Failed to destroy undecodable NFT collection: {:?}", e); + }, + }; + + ::DbWeight::get().reads_writes(0, writes) + } +} + /// Migration to initialize storage versions for pallets added after genesis. /// /// Ideally this would be done automatically (see @@ -957,7 +1030,6 @@ pub struct InitStorageVersions; impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { fn on_runtime_upgrade() -> Weight { use frame_support::traits::{GetStorageVersion, StorageVersion}; - use sp_runtime::traits::Saturating; let mut writes = 0; -- GitLab From a393cfcb28282cd2147134b8968fba3b41681ea9 Mon Sep 17 00:00:00 2001 From: Hugo Trentesaux Date: Tue, 14 Nov 2023 12:57:39 +0100 Subject: [PATCH 043/199] Add details in `--dev` cli flag documentation (#2305) add details in `--dev` flag to tell that it disables local peer discovery ### Context When adding automated end-to-end tests, we replaced `--dev` by ``` `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, `--alice`, and `--tmp` flags ``` as stated in the command line documentation. But the tests started failing due to the nodes connecting to each other. ### Fix This PR includes additional command line documentation to explain more in detail what `--dev` flag inludes. --- substrate/client/cli/src/params/shared_params.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index 6419e15c62a..465372fba17 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -35,6 +35,7 @@ pub struct SharedParams { /// /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, /// `--alice`, and `--tmp` flags, unless explicitly overridden. + /// It also disables local peer discovery (see --no-mdns and --discover-local) #[arg(long, conflicts_with_all = &["chain"])] pub dev: bool, -- GitLab From 39cc95740aa03f53a0e8bfb8537f0ddb5fa3e19e Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Tue, 14 Nov 2023 14:47:04 +0200 Subject: [PATCH 044/199] xcm-emulator: add Rococo<>Westend bridge and add tests for assets transfers over the bridge (#2251) - switch from Rococo<>Wococo to Rococo<>Westend bridge - add bidirectional simple tests - remove Wococo chains from xcm-emulator - added tests for assets transfers over Rococo<>Westend bridge fixes https://github.com/paritytech/parity-bridges-common/issues/2405 --- Cargo.lock | 78 +------ Cargo.toml | 5 +- .../assets/asset-hub-rococo/src/lib.rs | 4 +- .../assets/asset-hub-westend/src/lib.rs | 4 +- .../assets/asset-hub-wococo/Cargo.toml | 26 --- .../assets/asset-hub-wococo/src/lib.rs | 53 ----- .../bridges/bridge-hub-rococo/src/lib.rs | 2 +- .../bridges/bridge-hub-westend/src/genesis.rs | 2 +- .../bridges/bridge-hub-westend/src/lib.rs | 2 +- .../bridges/bridge-hub-wococo/Cargo.toml | 25 -- .../bridges/bridge-hub-wococo/src/lib.rs | 47 ---- .../parachains/testing/penpal/src/lib.rs | 2 +- .../emulated/chains/relays/wococo/Cargo.toml | 30 --- .../emulated/chains/relays/wococo/src/lib.rs | 46 ---- .../emulated/common/src/impls.rs | 151 ++++++++++-- .../Cargo.toml | 10 +- .../src/lib.rs | 60 ++--- .../networks/wococo-system/Cargo.toml | 16 -- .../networks/wococo-system/src/lib.rs | 50 ---- .../bridges/bridge-hub-rococo/Cargo.toml | 6 +- .../bridges/bridge-hub-rococo/src/lib.rs | 23 +- .../src/tests/asset_transfers.rs | 219 ++++++++++++++++++ .../bridge-hub-rococo/src/tests/mod.rs | 3 +- .../src/tests/{example.rs => send_xcm.rs} | 28 +-- .../bridges/bridge-hub-westend/Cargo.toml | 6 +- .../bridges/bridge-hub-westend/src/lib.rs | 27 ++- .../src/tests/asset_transfers.rs | 218 +++++++++++++++++ .../bridge-hub-westend/src/tests/mod.rs | 3 +- .../src/tests/{example.rs => send_xcm.rs} | 30 ++- .../bridge-hub-westend/src/tests/teleport.rs | 2 +- 30 files changed, 710 insertions(+), 468 deletions(-) delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs rename cumulus/parachains/integration-tests/emulated/networks/{rococo-wococo-system => rococo-westend-system}/Cargo.toml (57%) rename cumulus/parachains/integration-tests/emulated/networks/{rococo-wococo-system => rococo-westend-system}/src/lib.rs (57%) delete mode 100644 cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml delete mode 100644 cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs rename cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/{example.rs => send_xcm.rs} (77%) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs rename cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/{example.rs => send_xcm.rs} (71%) diff --git a/Cargo.lock b/Cargo.lock index 4a45d5c602e..32d9099b386 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1119,22 +1119,6 @@ dependencies = [ "westend-runtime-constants", ] -[[package]] -name = "asset-hub-wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "asset-hub-rococo-emulated-chain", - "asset-hub-rococo-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "serde_json", - "sp-core", - "sp-runtime", - "wococo-emulated-chain", -] - [[package]] name = "asset-test-utils" version = "1.0.0" @@ -2202,12 +2186,14 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", + "pallet-assets", + "pallet-balances", "pallet-bridge-messages", "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", - "rococo-wococo-system-emulated-network", + "rococo-westend-system-emulated-network", "staging-xcm", "staging-xcm-executor", ] @@ -2370,14 +2356,16 @@ dependencies = [ "cumulus-pallet-xcmp-queue", "emulated-integration-tests-common", "frame-support", + "pallet-assets", + "pallet-balances", "pallet-bridge-messages", "pallet-message-queue", "pallet-xcm", "parachains-common", "parity-scale-codec", + "rococo-westend-system-emulated-network", "staging-xcm", "staging-xcm-executor", - "westend-system-emulated-network", ] [[package]] @@ -2463,21 +2451,6 @@ dependencies = [ "westend-runtime-constants", ] -[[package]] -name = "bridge-hub-wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "bridge-hub-rococo-emulated-chain", - "bridge-hub-rococo-runtime", - "cumulus-primitives-core", - "emulated-integration-tests-common", - "frame-support", - "parachains-common", - "serde_json", - "sp-core", - "sp-runtime", -] - [[package]] name = "bridge-runtime-common" version = "0.1.0" @@ -14578,16 +14551,16 @@ dependencies = [ ] [[package]] -name = "rococo-wococo-system-emulated-network" +name = "rococo-westend-system-emulated-network" version = "0.0.0" dependencies = [ "asset-hub-rococo-emulated-chain", - "asset-hub-wococo-emulated-chain", + "asset-hub-westend-emulated-chain", "bridge-hub-rococo-emulated-chain", - "bridge-hub-wococo-emulated-chain", + "bridge-hub-westend-emulated-chain", "emulated-integration-tests-common", "rococo-emulated-chain", - "wococo-emulated-chain", + "westend-emulated-chain", ] [[package]] @@ -21236,37 +21209,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "wococo-emulated-chain" -version = "0.0.0" -dependencies = [ - "emulated-integration-tests-common", - "pallet-im-online", - "parachains-common", - "polkadot-primitives", - "rococo-emulated-chain", - "rococo-runtime", - "rococo-runtime-constants", - "sc-consensus-grandpa", - "serde_json", - "sp-authority-discovery", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "wococo-system-emulated-network" -version = "0.0.0" -dependencies = [ - "asset-hub-wococo-emulated-chain", - "bridge-hub-wococo-emulated-chain", - "emulated-integration-tests-common", - "penpal-emulated-chain", - "wococo-emulated-chain", -] - [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 30445bd5945..15a7d5c35bf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,16 +71,13 @@ members = [ "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", "cumulus/parachains/integration-tests/emulated/common", "cumulus/parachains/integration-tests/emulated/chains/relays/rococo", - "cumulus/parachains/integration-tests/emulated/chains/relays/wococo", "cumulus/parachains/integration-tests/emulated/chains/relays/westend", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", - "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend", "cumulus/parachains/integration-tests/emulated/networks/rococo-system", - "cumulus/parachains/integration-tests/emulated/networks/wococo-system", - "cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system", + "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system", "cumulus/parachains/integration-tests/emulated/networks/westend-system", "cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/parachain-info", diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index f94c4c3d255..1ed22b3cc4f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -21,7 +21,8 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, impls::Parachain, + xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; @@ -53,3 +54,4 @@ decl_test_parachains! { impl_accounts_helpers_for_parachain!(AssetHubRococo); impl_assert_events_helpers_for_parachain!(AssetHubRococo, false); impl_assets_helpers_for_parachain!(AssetHubRococo, Rococo); +impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 73d777247a5..4dcdb613ac2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -21,7 +21,8 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impl_foreign_assets_helpers_for_parachain, impls::Parachain, + xcm_emulator::decl_test_parachains, }; use westend_emulated_chain::Westend; @@ -53,3 +54,4 @@ decl_test_parachains! { impl_accounts_helpers_for_parachain!(AssetHubWestend); impl_assert_events_helpers_for_parachain!(AssetHubWestend, false); impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); +impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, Westend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml deleted file mode 100644 index 0f212c15999..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "asset-hub-wococo-emulated-chain" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Asset Hub Wococo emulated chain" -publish = false - -[dependencies] -serde_json = "1.0.104" - -# Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } - -# Polakadot -parachains-common = { path = "../../../../../../../parachains/common" } - -# Cumulus -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -asset-hub-rococo-runtime = { path = "../../../../../../runtimes/assets/asset-hub-rococo" } -wococo-emulated-chain = { path = "../../../relays/wococo" } -asset-hub-rococo-emulated-chain = { path = "../asset-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs deleted file mode 100644 index 38a6ece3472..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-wococo/src/lib.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Substrate -use frame_support::traits::OnInitialize; - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, -}; -use wococo_emulated_chain::Wococo; - -// AssetHubWococo Parachain declaration -decl_test_parachains! { - pub struct AssetHubWococo { - genesis = asset_hub_rococo_emulated_chain::genesis::genesis(), - on_init = { - asset_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = asset_hub_rococo_runtime, - core = { - XcmpMessageHandler: asset_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: asset_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: asset_hub_rococo_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: asset_hub_rococo_runtime::PolkadotXcm, - Assets: asset_hub_rococo_runtime::Assets, - ForeignAssets: asset_hub_rococo_runtime::ForeignAssets, - PoolAssets: asset_hub_rococo_runtime::PoolAssets, - AssetConversion: asset_hub_rococo_runtime::AssetConversion, - Balances: asset_hub_rococo_runtime::Balances, - } - }, -} - -// AssetHubWococo implementation -impl_accounts_helpers_for_parachain!(AssetHubWococo); -impl_assert_events_helpers_for_parachain!(AssetHubWococo, false); -impl_assets_helpers_for_parachain!(AssetHubWococo, Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index f4557021f62..ea0c9513abc 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, + impls::Parachain, xcm_emulator::decl_test_parachains, }; // BridgeHubRococo Parachain declaration diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs index cd578d6862f..2eb7e0ddbd2 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/genesis.rs @@ -22,7 +22,7 @@ use emulated_integration_tests_common::{ }; use parachains_common::Balance; -pub const PARA_ID: u32 = 1013; +pub const PARA_ID: u32 = 1002; pub const ED: Balance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; pub fn genesis() -> Storage { diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index 1f1126d4565..4a130ac1f27 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -21,7 +21,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, + impls::Parachain, xcm_emulator::decl_test_parachains, }; // BridgeHubWestend Parachain declaration diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml deleted file mode 100644 index 0b02730a50c..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "bridge-hub-wococo-emulated-chain" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Bridge Hub Wococo emulated chain" -publish = false - -[dependencies] -serde_json = "1.0.104" - -# Substrate -sp-core = { path = "../../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../../substrate/primitives/runtime", default-features = false } -frame-support = { path = "../../../../../../../../substrate/frame/support", default-features = false } - -# Polakadot -parachains-common = { path = "../../../../../../../parachains/common" } - -# Cumulus -cumulus-primitives-core = { path = "../../../../../../../primitives/core", default-features = false } -emulated-integration-tests-common = { path = "../../../../common", default-features = false } -bridge-hub-rococo-runtime = { path = "../../../../../../runtimes/bridge-hubs/bridge-hub-rococo" } -bridge-hub-rococo-emulated-chain = { path = "../bridge-hub-rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs deleted file mode 100644 index e643f104aa3..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-wococo/src/lib.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Substrate -use frame_support::traits::OnInitialize; - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - xcm_emulator::decl_test_parachains, -}; - -// BridgeHubWococo Parachain declaration -decl_test_parachains! { - pub struct BridgeHubWococo { - genesis = bridge_hub_rococo_emulated_chain::genesis::genesis(), - on_init = { - bridge_hub_rococo_runtime::AuraExt::on_initialize(1); - }, - runtime = bridge_hub_rococo_runtime, - core = { - XcmpMessageHandler: bridge_hub_rococo_runtime::XcmpQueue, - LocationToAccountId: bridge_hub_rococo_runtime::xcm_config::LocationToAccountId, - ParachainInfo: bridge_hub_rococo_runtime::ParachainInfo, - }, - pallets = { - PolkadotXcm: bridge_hub_rococo_runtime::PolkadotXcm, - Balances: bridge_hub_rococo_runtime::Balances, - } - }, -} - -// BridgeHubWococo implementation -impl_accounts_helpers_for_parachain!(BridgeHubWococo); -impl_assert_events_helpers_for_parachain!(BridgeHubWococo, false); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index 537f96f45b4..f9a422bfcba 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -22,7 +22,7 @@ use frame_support::traits::OnInitialize; // Cumulus use emulated_integration_tests_common::{ impl_accounts_helpers_for_parachain, impl_assert_events_helpers_for_parachain, - impl_assets_helpers_for_parachain, xcm_emulator::decl_test_parachains, + impl_assets_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml deleted file mode 100644 index 51a87954b8c..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "wococo-emulated-chain" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Wococo emulated chain" -publish = false - -[dependencies] -serde_json = "1.0.104" - -# Substrate -sp-core = { path = "../../../../../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } -sp-authority-discovery = { path = "../../../../../../../substrate/primitives/authority-discovery", default-features = false } -sp-consensus-babe = { path = "../../../../../../../substrate/primitives/consensus/babe", default-features = false } -beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../../../substrate/primitives/consensus/beefy" } -grandpa = { package = "sc-consensus-grandpa", path = "../../../../../../../substrate/client/consensus/grandpa", default-features = false } -pallet-im-online = { path = "../../../../../../../substrate/frame/im-online", default-features = false } - -# Polkadot -polkadot-primitives = { path = "../../../../../../../polkadot/primitives", default-features = false } -rococo-runtime-constants = { path = "../../../../../../../polkadot/runtime/rococo/constants", default-features = false } -rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } - -# Cumulus -parachains-common = { path = "../../../../../../parachains/common" } -emulated-integration-tests-common = { path = "../../../common", default-features = false } -rococo-emulated-chain = { path = "../rococo" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs deleted file mode 100644 index a04deee330f..00000000000 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/wococo/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Cumulus -use emulated_integration_tests_common::{ - impl_accounts_helpers_for_relay_chain, impl_assert_events_helpers_for_relay_chain, - impl_hrmp_channels_helpers_for_relay_chain, impl_send_transact_helpers_for_relay_chain, - xcm_emulator::decl_test_relay_chains, -}; - -// Wococo declaration -decl_test_relay_chains! { - #[api_version(8)] - pub struct Wococo { - genesis = rococo_emulated_chain::genesis::genesis(), - on_init = (), - runtime = rococo_runtime, - core = { - SovereignAccountOf: rococo_runtime::xcm_config::LocationConverter, - }, - pallets = { - XcmPallet: rococo_runtime::XcmPallet, - Sudo: rococo_runtime::Sudo, - Balances: rococo_runtime::Balances, - Hrmp: rococo_runtime::Hrmp, - } - }, -} - -// Wococo implementation -impl_accounts_helpers_for_relay_chain!(Wococo); -impl_assert_events_helpers_for_relay_chain!(Wococo); -impl_hrmp_channels_helpers_for_relay_chain!(Wococo); -impl_send_transact_helpers_for_relay_chain!(Wococo); diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 82f27b93200..8c94df6d888 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -47,7 +47,8 @@ pub use xcm::{ pub use cumulus_pallet_parachain_system; pub use cumulus_pallet_xcmp_queue; pub use cumulus_primitives_core::{ - relay_chain::HrmpChannelId, DmpMessageHandler, ParaId, XcmpMessageHandler, + relay_chain::HrmpChannelId, DmpMessageHandler, Junction, Junctions, NetworkId, ParaId, + XcmpMessageHandler, }; pub use parachains_common::{AccountId, Balance}; pub use xcm_emulator::{ @@ -62,11 +63,14 @@ use bp_messages::{ LaneId, MessageKey, OutboundLaneData, }; use bridge_runtime_common::messages_xcm_extension::XcmBlobMessageDispatchResult; -pub use pallet_bridge_messages::Instance2 as BridgeMessagesInstance2; -use pallet_bridge_messages::{Config, Instance1, OutboundLanes, Pallet}; +use pallet_bridge_messages::{Config, OutboundLanes, Pallet}; +pub use pallet_bridge_messages::{ + Instance1 as BridgeMessagesInstance1, Instance2 as BridgeMessagesInstance2, + Instance3 as BridgeMessagesInstance3, +}; -pub struct BridgeHubMessageHandler { - _marker: std::marker::PhantomData<(S, T, I)>, +pub struct BridgeHubMessageHandler { + _marker: std::marker::PhantomData<(S, SI, T, TI)>, } struct LaneIdWrapper(LaneId); @@ -83,13 +87,14 @@ impl From for LaneIdWrapper { } } -impl BridgeMessageHandler for BridgeHubMessageHandler +impl BridgeMessageHandler for BridgeHubMessageHandler where - S: Config, - T: Config, - I: 'static, - >::InboundPayload: From>, - >::MessageDispatch: + S: Config, + SI: 'static, + T: Config, + TI: 'static, + >::InboundPayload: From>, + >::MessageDispatch: MessageDispatch, { fn get_source_outbound_messages() -> Vec { @@ -100,16 +105,13 @@ where // collect messages from `OutboundMessages` for each active outbound lane in the source for lane in active_lanes { - let latest_generated_nonce = - OutboundLanes::::get(lane).latest_generated_nonce; - let latest_received_nonce = - OutboundLanes::::get(lane).latest_received_nonce; + let latest_generated_nonce = OutboundLanes::::get(lane).latest_generated_nonce; + let latest_received_nonce = OutboundLanes::::get(lane).latest_received_nonce; (latest_received_nonce + 1..=latest_generated_nonce).for_each(|nonce| { - let encoded_payload: Vec = - Pallet::::outbound_message_data(*lane, nonce) - .expect("Bridge message does not exist") - .into(); + let encoded_payload: Vec = Pallet::::outbound_message_data(*lane, nonce) + .expect("Bridge message does not exist") + .into(); let payload = Vec::::decode(&mut &encoded_payload[..]) .expect("Decodign XCM message failed"); let id: u32 = LaneIdWrapper(*lane).into(); @@ -133,9 +135,9 @@ where // Directly dispatch outbound messages assuming everything is correct // and bypassing the `Relayers` and `InboundLane` logic - let dispatch_result = TargetMessageDispatch::::dispatch(DispatchMessage { + let dispatch_result = TargetMessageDispatch::::dispatch(DispatchMessage { key: MessageKey { lane_id, nonce }, - data: DispatchMessageData::> { payload }, + data: DispatchMessageData::> { payload }, }); let result = match dispatch_result.dispatch_level_result { @@ -151,14 +153,14 @@ where } fn notify_source_message_delivery(lane_id: u32) { - let data = OutboundLanes::::get(LaneIdWrapper::from(lane_id).0); + let data = OutboundLanes::::get(LaneIdWrapper::from(lane_id).0); let new_data = OutboundLaneData { oldest_unpruned_nonce: data.oldest_unpruned_nonce + 1, latest_received_nonce: data.latest_received_nonce + 1, ..data }; - OutboundLanes::::insert(LaneIdWrapper::from(lane_id).0, new_data); + OutboundLanes::::insert(LaneIdWrapper::from(lane_id).0, new_data); } } @@ -392,6 +394,23 @@ macro_rules! impl_accounts_helpers_for_parachain { } }); } + + /// Return local sovereign account of `para_id` on other `network_id` + pub fn sovereign_account_of_parachain_on_other_global_consensus( + network_id: $crate::impls::NetworkId, + para_id: $crate::impls::ParaId, + ) -> $crate::impls::AccountId { + let remote_location = $crate::impls::MultiLocation { + parents: 2, + interior: $crate::impls::Junctions::X2( + $crate::impls::Junction::GlobalConsensus(network_id), + $crate::impls::Junction::Parachain(para_id.into()), + ), + }; + ::execute_with(|| { + Self::sovereign_account_id_of(remote_location) + }) + } } } }; @@ -614,7 +633,9 @@ macro_rules! impl_assets_helpers_for_parachain { $crate::impls::assert_expected_events!( Self, vec![ - RuntimeEvent::::Assets($crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount }) => { + RuntimeEvent::::Assets( + $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } + ) => { asset_id: *asset_id == id, owner: *owner == beneficiary.clone().into(), amount: *amount == amount_to_mint, @@ -687,3 +708,85 @@ macro_rules! impl_assets_helpers_for_parachain { } }; } + +#[macro_export] +macro_rules! impl_foreign_assets_helpers_for_parachain { + ( $chain:ident, $relay_chain:ident ) => { + $crate::impls::paste::paste! { + impl $chain { + /// Create foreign assets using sudo `ForeignAssets::force_create()` + pub fn force_create_foreign_asset( + id: $crate::impls::MultiLocation, + owner: $crate::impls::AccountId, + is_sufficient: bool, + min_balance: u128, + prefund_accounts: Vec<($crate::impls::AccountId, u128)>, + ) { + use $crate::impls::Inspect; + let sudo_origin = <$chain as $crate::impls::Chain>::RuntimeOrigin::root(); + ::execute_with(|| { + $crate::impls::assert_ok!( + ]>::ForeignAssets::force_create( + sudo_origin, + id, + owner.clone().into(), + is_sufficient, + min_balance, + ) + ); + assert!(]>::ForeignAssets::asset_exists(id)); + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + $crate::impls::assert_expected_events!( + Self, + vec![ + RuntimeEvent::::ForeignAssets( + $crate::impls::pallet_assets::Event::ForceCreated { + asset_id, + .. + } + ) => { asset_id: *asset_id == id, }, + ] + ); + }); + for (beneficiary, amount) in prefund_accounts.into_iter() { + let signed_origin = + <$chain as $crate::impls::Chain>::RuntimeOrigin::signed(owner.clone()); + Self::mint_foreign_asset(signed_origin, id, beneficiary, amount); + } + } + + /// Mint assets making use of the ForeignAssets pallet-assets instance + pub fn mint_foreign_asset( + signed_origin: ::RuntimeOrigin, + id: $crate::impls::MultiLocation, + beneficiary: $crate::impls::AccountId, + amount_to_mint: u128, + ) { + ::execute_with(|| { + $crate::impls::assert_ok!(]>::ForeignAssets::mint( + signed_origin, + id.into(), + beneficiary.clone().into(), + amount_to_mint + )); + + type RuntimeEvent = <$chain as $crate::impls::Chain>::RuntimeEvent; + + $crate::impls::assert_expected_events!( + Self, + vec![ + RuntimeEvent::::ForeignAssets( + $crate::impls::pallet_assets::Event::Issued { asset_id, owner, amount } + ) => { + asset_id: *asset_id == id, + owner: *owner == beneficiary.clone().into(), + amount: *amount == amount_to_mint, + }, + ] + ); + }); + } + } + } + }; +} diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml similarity index 57% rename from cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml rename to cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml index 53a6f0840a5..34713f5b48e 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/Cargo.toml @@ -1,18 +1,18 @@ [package] -name = "rococo-wococo-system-emulated-network" +name = "rococo-westend-system-emulated-network" version = "0.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" -description = "Rococo<>Wococo emulated bridged network" +description = "Rococo<>Westend emulated bridged network" publish = false [dependencies] # Cumulus emulated-integration-tests-common = { path = "../../common", default-features = false } rococo-emulated-chain = { path = "../../chains/relays/rococo" } -wococo-emulated-chain = { path = "../../chains/relays/wococo" } +westend-emulated-chain = { path = "../../chains/relays/westend" } asset-hub-rococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-rococo" } -asset-hub-wococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-wococo" } +asset-hub-westend-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-westend" } bridge-hub-rococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-rococo" } -bridge-hub-wococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-wococo" } +bridge-hub-westend-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-westend" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs similarity index 57% rename from cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs rename to cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs index e20dcfa6b32..b03ff692b95 100644 --- a/cumulus/parachains/integration-tests/emulated/networks/rococo-wococo-system/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system/src/lib.rs @@ -14,23 +14,23 @@ // limitations under the License. pub use asset_hub_rococo_emulated_chain; -pub use asset_hub_wococo_emulated_chain; +pub use asset_hub_westend_emulated_chain; pub use bridge_hub_rococo_emulated_chain; -pub use bridge_hub_wococo_emulated_chain; +pub use bridge_hub_westend_emulated_chain; pub use rococo_emulated_chain; -pub use wococo_emulated_chain; +pub use westend_emulated_chain; use asset_hub_rococo_emulated_chain::AssetHubRococo; -use asset_hub_wococo_emulated_chain::AssetHubWococo; +use asset_hub_westend_emulated_chain::AssetHubWestend; use bridge_hub_rococo_emulated_chain::BridgeHubRococo; -use bridge_hub_wococo_emulated_chain::BridgeHubWococo; +use bridge_hub_westend_emulated_chain::BridgeHubWestend; use rococo_emulated_chain::Rococo; -use wococo_emulated_chain::Wococo; +use westend_emulated_chain::Westend; // Cumulus use emulated_integration_tests_common::{ accounts::{ALICE, BOB}, - impls::{BridgeHubMessageHandler, BridgeMessagesInstance2}, + impls::{BridgeHubMessageHandler, BridgeMessagesInstance1, BridgeMessagesInstance3}, xcm_emulator::{ decl_test_bridges, decl_test_networks, decl_test_sender_receiver_accounts_parameter_types, Chain, @@ -44,51 +44,53 @@ decl_test_networks! { AssetHubRococo, BridgeHubRococo, ], - bridge = RococoWococoMockBridge + bridge = RococoWestendMockBridge }, - pub struct WococoMockNet { - relay_chain = Wococo, + pub struct WestendMockNet { + relay_chain = Westend, parachains = vec![ - AssetHubWococo, - BridgeHubWococo, + AssetHubWestend, + BridgeHubWestend, ], - bridge = WococoRococoMockBridge + bridge = WestendRococoMockBridge }, } decl_test_bridges! { - pub struct RococoWococoMockBridge { + pub struct RococoWestendMockBridge { source = BridgeHubRococoPara, - target = BridgeHubWococoPara, - handler = RococoWococoMessageHandler + target = BridgeHubWestendPara, + handler = RococoWestendMessageHandler }, - pub struct WococoRococoMockBridge { - source = BridgeHubWococoPara, + pub struct WestendRococoMockBridge { + source = BridgeHubWestendPara, target = BridgeHubRococoPara, - handler = WococoRococoMessageHandler + handler = WestendRococoMessageHandler } } type BridgeHubRococoRuntime = ::Runtime; -type BridgeHubWococoRuntime = ::Runtime; +type BridgeHubWestendRuntime = ::Runtime; -pub type RococoWococoMessageHandler = BridgeHubMessageHandler< +pub type RococoWestendMessageHandler = BridgeHubMessageHandler< BridgeHubRococoRuntime, - BridgeHubWococoRuntime, - BridgeMessagesInstance2, + BridgeMessagesInstance3, + BridgeHubWestendRuntime, + BridgeMessagesInstance1, >; -pub type WococoRococoMessageHandler = BridgeHubMessageHandler< - BridgeHubWococoRuntime, +pub type WestendRococoMessageHandler = BridgeHubMessageHandler< + BridgeHubWestendRuntime, + BridgeMessagesInstance1, BridgeHubRococoRuntime, - BridgeMessagesInstance2, + BridgeMessagesInstance3, >; decl_test_sender_receiver_accounts_parameter_types! { RococoRelay { sender: ALICE, receiver: BOB }, AssetHubRococoPara { sender: ALICE, receiver: BOB }, BridgeHubRococoPara { sender: ALICE, receiver: BOB }, - WococoRelay { sender: ALICE, receiver: BOB }, - AssetHubWococoPara { sender: ALICE, receiver: BOB }, - BridgeHubWococoPara { sender: ALICE, receiver: BOB } + WestendRelay { sender: ALICE, receiver: BOB }, + AssetHubWestendPara { sender: ALICE, receiver: BOB }, + BridgeHubWestendPara { sender: ALICE, receiver: BOB } } diff --git a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml deleted file mode 100644 index a596617e82b..00000000000 --- a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "wococo-system-emulated-network" -version = "0.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -description = "Wococo System emulated network" -publish = false - -[dependencies] -# Cumulus -emulated-integration-tests-common = { path = "../../common", default-features = false } -wococo-emulated-chain = { path = "../../chains/relays/wococo" } -asset-hub-wococo-emulated-chain = { path = "../../chains/parachains/assets/asset-hub-wococo" } -bridge-hub-wococo-emulated-chain = { path = "../../chains/parachains/bridges/bridge-hub-wococo" } -penpal-emulated-chain = { path = "../../chains/parachains/testing/penpal" } diff --git a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs b/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs deleted file mode 100644 index 5369afe7dff..00000000000 --- a/cumulus/parachains/integration-tests/emulated/networks/wococo-system/src/lib.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub use asset_hub_wococo_emulated_chain; -pub use bridge_hub_wococo_emulated_chain; -pub use wococo_emulated_chain; - -use asset_hub_wococo_emulated_chain::AssetHubWococo; -use bridge_hub_wococo_emulated_chain::BridgeHubWococo; -use penpal_emulated_chain::{PenpalA, PenpalB}; -use wococo_emulated_chain::Wococo; - -// Cumulus -use emulated_integration_tests_common::{ - accounts::{ALICE, BOB}, - xcm_emulator::{decl_test_networks, decl_test_sender_receiver_accounts_parameter_types}, -}; - -decl_test_networks! { - pub struct WococoMockNet { - relay_chain = Wococo, - parachains = vec![ - AssetHubWococo, - BridgeHubWococo, - PenpalA, - PenpalB, - ], - bridge = () - }, -} - -decl_test_sender_receiver_accounts_parameter_types! { - WococoRelay { sender: ALICE, receiver: BOB }, - AssetHubWococoPara { sender: ALICE, receiver: BOB }, - BridgeHubWococoPara { sender: ALICE, receiver: BOB }, - PenpalAPara { sender: ALICE, receiver: BOB }, - PenpalBPara { sender: ALICE, receiver: BOB } -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 035d9c10793..00e3af2e4ff 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -11,7 +11,9 @@ publish = false codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } # Substrate -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } # Polkadot @@ -30,4 +32,4 @@ cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", def cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false} -rococo-wococo-system-emulated-network ={ path = "../../../networks/rococo-wococo-system" } +rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 19e10d23bbb..53665437887 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -21,7 +21,7 @@ pub use xcm::{ prelude::{AccountId32 as AccountId32Junction, *}, v3::{ Error, - NetworkId::{Rococo as RococoId, Wococo as WococoId}, + NetworkId::{Rococo as RococoId, Westend as WestendId}, }, }; @@ -30,6 +30,8 @@ pub use bp_messages::LaneId; // Cumulus pub use emulated_integration_tests_common::{ + accounts::ALICE, + impls::Inspect, test_parachain_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, @@ -39,17 +41,22 @@ pub use emulated_integration_tests_common::{ PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, }; pub use parachains_common::{AccountId, Balance}; -pub use rococo_wococo_system_emulated_network::{ +pub use rococo_westend_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, bridge_hub_rococo_emulated_chain::{ genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubRococoParaPallet as BridgeHubRococoPallet, }, - rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, + rococo_emulated_chain::RococoRelayPallet as RococoPallet, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, - AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWococoPara as AssetHubWococo, - BridgeHubRococoPara as BridgeHubRococo, BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, - BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWococoPara as BridgeHubWococo, - RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, - RococoRelaySender as RococoSender, + AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, + AssetHubWestendParaReceiver as AssetHubWestendReceiver, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWestendPara as BridgeHubWestend, + RococoRelay as Rococo, }; pub const ASSET_ID: u32 = 1; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs new file mode 100644 index 00000000000..c55613f2826 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -0,0 +1,219 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; + +fn send_asset_from_asset_hub_rococo_to_asset_hub_westend(id: MultiLocation, amount: u128) { + let signed_origin = + ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); + let asset_hub_westend_para_id = AssetHubWestend::para_id().into(); + let destination = MultiLocation { + parents: 2, + interior: X2(GlobalConsensus(NetworkId::Westend), Parachain(asset_hub_westend_para_id)), + }; + let beneficiary_id = AssetHubWestendReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); + let assets: MultiAssets = (id, amount).into(); + let fee_asset_item = 0; + + // fund the AHR's SA on BHR for paying bridge transport fees + let ahr_as_seen_by_bhr = BridgeHubRococo::sibling_location_of(AssetHubRococo::para_id()); + let sov_ahr_on_bhr = BridgeHubRococo::sovereign_account_id_of(ahr_as_seen_by_bhr); + BridgeHubRococo::fund_accounts(vec![(sov_ahr_on_bhr.into(), 10_000_000_000_000u128)]); + + AssetHubRococo::execute_with(|| { + assert_ok!( + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ) + ); + }); + + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubRococo, + vec![ + // pay for bridge fees + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { .. }) => {}, + // message exported + RuntimeEvent::BridgeWestendMessages( + pallet_bridge_messages::Event::MessageAccepted { .. } + ) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWestend, + vec![ + // message dispatched successfully + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); +} + +#[test] +fn send_rocs_from_asset_hub_rococo_to_asset_hub_westend() { + let roc_at_asset_hub_rococo: MultiLocation = Parent.into(); + let roc_at_asset_hub_westend = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Rococo)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend, + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + let sender_rocs_before = + ::account_data_of(AssetHubRococoSender::get()).free; + let receiver_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + }); + + let amount = ASSET_HUB_ROCOCO_ED * 1_000; + send_asset_from_asset_hub_rococo_to_asset_hub_westend(roc_at_asset_hub_rococo, amount); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // issue ROCs on AHW + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == roc_at_asset_hub_rococo, + owner: *owner == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = + ::account_data_of(AssetHubRococoSender::get()).free; + let receiver_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendReceiver::get()) + }); + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_before > sender_rocs_after); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is reduced by sent amount + assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before + amount); +} + +#[test] +fn send_wnds_from_asset_hub_rococo_to_asset_hub_westend() { + let prefund_amount = 10_000_000_000_000u128; + let wnd_at_asset_hub_rococo = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Westend)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo, + owner, + true, + ASSET_MIN_BALANCE, + vec![(AssetHubRococoSender::get(), prefund_amount)], + ); + + // fund the AHR's SA on AHW with the WND tokens held in reserve + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + AssetHubWestend::fund_accounts(vec![(sov_ahr_on_ahw.clone(), prefund_amount)]); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + assert_eq!(wnds_in_reserve_on_ahw_before, prefund_amount); + let sender_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) + }); + assert_eq!(sender_wnds_before, prefund_amount); + let receiver_wnds_before = + ::account_data_of(AssetHubWestendReceiver::get()).free; + + let amount_to_send = ASSET_HUB_WESTEND_ED * 1_000; + send_asset_from_asset_hub_rococo_to_asset_hub_westend(wnd_at_asset_hub_rococo, amount_to_send); + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + // WND is withdrawn from AHR's SA on AHW + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_ahr_on_ahw, + amount: *amount == amount_to_send, + }, + // WNDs deposited to beneficiary + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == AssetHubWestendReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoSender::get()) + }); + let receiver_wnds_after = + ::account_data_of(AssetHubWestendReceiver::get()).free; + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw).free; + + // Sender's balance is reduced + assert!(sender_wnds_before > sender_wnds_after); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is reduced by sent amount + assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before - amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index 1eef05c6b92..4e2ef1434fd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -13,5 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod example; +mod asset_transfers; +mod send_xcm; mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs similarity index 77% rename from cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 35cfa394174..4e61f7ce0dd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/example.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -16,7 +16,7 @@ use crate::*; #[test] -fn example() { +fn send_xcm_from_rococo_relay_to_westend_asset_hub() { // Init tests variables // XcmPallet send arguments let sudo_origin = ::RuntimeOrigin::root(); @@ -29,13 +29,13 @@ fn example() { let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { - network: WococoId, - destination: X1(Parachain(AssetHubWococo::para_id().into())), + network: WestendId, + destination: X1(Parachain(AssetHubWestend::para_id().into())), xcm: remote_xcm, }, ])); - //Rococo Global Consensus + // Rococo Global Consensus // Send XCM message from Relay Chain to Bridge Hub source Parachain Rococo::execute_with(|| { assert_ok!(::XcmPallet::send( @@ -64,32 +64,32 @@ fn example() { success: true, .. }) => {}, - RuntimeEvent::BridgeWococoMessages(pallet_bridge_messages::Event::MessageAccepted { - lane_id: LaneId([0, 0, 0, 1]), + RuntimeEvent::BridgeWestendMessages(pallet_bridge_messages::Event::MessageAccepted { + lane_id: LaneId([0, 0, 0, 2]), nonce: 1, }) => {}, ] ); }); - // Wococo GLobal Consensus + // Westend Global Consensus // Receive XCM message in Bridge Hub target Parachain - BridgeHubWococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - BridgeHubWococo, + BridgeHubWestend, vec![ RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, ] ); }); - // Receive embeded XCM message within `ExportMessage` in Parachain destination - AssetHubWococo::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; + // Receive embedded XCM message within `ExportMessage` in Parachain destination + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - AssetHubWococo, + AssetHubWestend, vec![ RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { .. diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index 62b969b682f..e5b1fce5f2b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -11,7 +11,9 @@ publish = false codec = { package = "parity-scale-codec", version = "3.4.0", default-features = false } # Substrate -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } # Polkadot @@ -30,4 +32,4 @@ cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", def cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } emulated-integration-tests-common = { path = "../../../common", default-features = false} -westend-system-emulated-network ={ path = "../../../networks/westend-system" } +rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index f406a73d18d..04746aa8670 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -19,7 +19,10 @@ pub use frame_support::assert_ok; // Polkadot pub use xcm::{ prelude::{AccountId32 as AccountId32Junction, *}, - v3::{Error, NetworkId::Rococo as RococoId}, + v3::{ + Error, + NetworkId::{Rococo as RococoId, Westend as WestendId}, + }, }; // Bridges @@ -27,6 +30,8 @@ pub use bp_messages::LaneId; // Cumulus pub use emulated_integration_tests_common::{ + accounts::ALICE, + impls::Inspect, test_parachain_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, helpers::weight_within_threshold, Chain, Parachain as Para, @@ -36,16 +41,22 @@ pub use emulated_integration_tests_common::{ PROOF_SIZE_THRESHOLD, REF_TIME_THRESHOLD, XCM_V3, }; pub use parachains_common::{AccountId, Balance}; -pub use westend_system_emulated_network::{ +pub use rococo_westend_system_emulated_network::{ + asset_hub_rococo_emulated_chain::{ + genesis::ED as ASSET_HUB_ROCOCO_ED, AssetHubRococoParaPallet as AssetHubRococoPallet, + }, + asset_hub_westend_emulated_chain::{ + genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, + }, bridge_hub_westend_emulated_chain::{ - genesis::ED as BRIDGE_HUB_ROCOCO_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, + genesis::ED as BRIDGE_HUB_WESTEND_ED, BridgeHubWestendParaPallet as BridgeHubWestendPallet, }, - westend_emulated_chain::{genesis::ED as ROCOCO_ED, WestendRelayPallet as WestendPallet}, + westend_emulated_chain::WestendRelayPallet as WestendPallet, + AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, - AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubWestendPara as BridgeHubWestend, - BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, - BridgeHubWestendParaSender as BridgeHubWestendSender, WestendRelay as Westend, - WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, + AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubWestendPara as BridgeHubWestend, BridgeHubWestendParaSender as BridgeHubWestendSender, + WestendRelay as Westend, }; pub const ASSET_ID: u32 = 1; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs new file mode 100644 index 00000000000..f90514f80c3 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -0,0 +1,218 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::*; + +fn send_asset_from_asset_hub_westend_to_asset_hub_rococo(id: MultiLocation, amount: u128) { + let signed_origin = + ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); + let asset_hub_rococo_para_id = AssetHubRococo::para_id().into(); + let destination = MultiLocation { + parents: 2, + interior: X2(GlobalConsensus(NetworkId::Rococo), Parachain(asset_hub_rococo_para_id)), + }; + let beneficiary_id = AssetHubRococoReceiver::get(); + let beneficiary: MultiLocation = + AccountId32Junction { network: None, id: beneficiary_id.into() }.into(); + let assets: MultiAssets = (id, amount).into(); + let fee_asset_item = 0; + + // fund the AHW's SA on BHW for paying bridge transport fees + let ahw_as_seen_by_bhw = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); + let sov_ahw_on_bhw = BridgeHubWestend::sovereign_account_id_of(ahw_as_seen_by_bhw); + BridgeHubWestend::fund_accounts(vec![(sov_ahw_on_bhw.into(), 10_000_000_000_000u128)]); + + AssetHubWestend::execute_with(|| { + assert_ok!( + ::PolkadotXcm::limited_reserve_transfer_assets( + signed_origin, + bx!(destination.into()), + bx!(beneficiary.into()), + bx!(assets.into()), + fee_asset_item, + WeightLimit::Unlimited, + ) + ); + }); + + BridgeHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubWestend, + vec![ + // pay for bridge fees + RuntimeEvent::Balances(pallet_balances::Event::Withdraw { .. }) => {}, + // message exported + RuntimeEvent::BridgeRococoMessages( + pallet_bridge_messages::Event::MessageAccepted { .. } + ) => {}, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + BridgeHubRococo, + vec![ + // message dispatched successfully + RuntimeEvent::XcmpQueue( + cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. } + ) => {}, + ] + ); + }); +} + +#[test] +fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { + let wnd_at_asset_hub_westend: MultiLocation = Parent.into(); + let wnd_at_asset_hub_rococo = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Westend)) }; + let owner: AccountId = AssetHubRococo::account_id_of(ALICE); + AssetHubRococo::force_create_foreign_asset( + wnd_at_asset_hub_rococo, + owner, + true, + ASSET_MIN_BALANCE, + vec![], + ); + let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Rococo, + AssetHubRococo::para_id(), + ); + + let wnds_in_reserve_on_ahw_before = + ::account_data_of(sov_ahr_on_ahw.clone()).free; + let sender_wnds_before = + ::account_data_of(AssetHubWestendSender::get()).free; + let receiver_wnds_before = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + }); + + let amount = ASSET_HUB_WESTEND_ED * 1_000; + send_asset_from_asset_hub_westend_to_asset_hub_rococo(wnd_at_asset_hub_westend, amount); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // issue WNDs on AHR + RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { + asset_id: *asset_id == wnd_at_asset_hub_rococo, + owner: *owner == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_wnds_after = + ::account_data_of(AssetHubWestendSender::get()).free; + let receiver_wnds_after = AssetHubRococo::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(wnd_at_asset_hub_rococo, &AssetHubRococoReceiver::get()) + }); + let wnds_in_reserve_on_ahw_after = + ::account_data_of(sov_ahr_on_ahw).free; + + // Sender's balance is reduced + assert!(sender_wnds_before > sender_wnds_after); + // Receiver's balance is increased + assert!(receiver_wnds_after > receiver_wnds_before); + // Reserve balance is increased by sent amount + assert_eq!(wnds_in_reserve_on_ahw_after, wnds_in_reserve_on_ahw_before + amount); +} + +#[test] +fn send_rocs_from_asset_hub_westend_to_asset_hub_rococo() { + let prefund_amount = 10_000_000_000_000u128; + let roc_at_asset_hub_westend = + MultiLocation { parents: 2, interior: X1(GlobalConsensus(NetworkId::Rococo)) }; + let owner: AccountId = AssetHubWestend::account_id_of(ALICE); + AssetHubWestend::force_create_foreign_asset( + roc_at_asset_hub_westend, + owner, + true, + ASSET_MIN_BALANCE, + vec![(AssetHubWestendSender::get(), prefund_amount)], + ); + + // fund the AHW's SA on AHR with the ROC tokens held in reserve + let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( + NetworkId::Westend, + AssetHubWestend::para_id(), + ); + AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), prefund_amount)]); + + let rocs_in_reserve_on_ahr_before = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + assert_eq!(rocs_in_reserve_on_ahr_before, prefund_amount); + let sender_rocs_before = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) + }); + assert_eq!(sender_rocs_before, prefund_amount); + let receiver_rocs_before = + ::account_data_of(AssetHubRococoReceiver::get()).free; + + let amount_to_send = ASSET_HUB_ROCOCO_ED * 1_000; + send_asset_from_asset_hub_westend_to_asset_hub_rococo(roc_at_asset_hub_westend, amount_to_send); + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubRococo, + vec![ + // ROC is withdrawn from AHW's SA on AHR + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_ahw_on_ahr, + amount: *amount == amount_to_send, + }, + // ROCs deposited to beneficiary + RuntimeEvent::Balances(pallet_balances::Event::Deposit { who, .. }) => { + who: *who == AssetHubRococoReceiver::get(), + }, + // message processed successfully + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); + }); + + let sender_rocs_after = AssetHubWestend::execute_with(|| { + type Assets = ::ForeignAssets; + >::balance(roc_at_asset_hub_westend, &AssetHubWestendSender::get()) + }); + let receiver_rocs_after = + ::account_data_of(AssetHubRococoReceiver::get()).free; + let rocs_in_reserve_on_ahr_after = + ::account_data_of(sov_ahw_on_ahr.clone()).free; + + // Sender's balance is reduced + assert!(sender_rocs_before > sender_rocs_after); + // Receiver's balance is increased + assert!(receiver_rocs_after > receiver_rocs_before); + // Reserve balance is reduced by sent amount + assert_eq!(rocs_in_reserve_on_ahr_after, rocs_in_reserve_on_ahr_before - amount_to_send); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index 1eef05c6b92..4e2ef1434fd 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -13,5 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod example; +mod asset_transfers; +mod send_xcm; mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs similarity index 71% rename from cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs rename to cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs index 1fdd9441e48..4b21d758cd9 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/example.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/send_xcm.rs @@ -16,7 +16,7 @@ use crate::*; #[test] -fn example() { +fn send_xcm_from_westend_relay_to_rococo_asset_hub() { // Init tests variables // XcmPallet send arguments let sudo_origin = ::RuntimeOrigin::root(); @@ -30,7 +30,7 @@ fn example() { UnpaidExecution { weight_limit, check_origin }, ExportMessage { network: RococoId, - destination: X1(Parachain(AssetHubWestend::para_id().into())), + destination: X1(Parachain(AssetHubRococo::para_id().into())), xcm: remote_xcm, }, ])); @@ -71,4 +71,30 @@ fn example() { ] ); }); + + // Rococo Global Consensus + // Receive XCM message in Bridge Hub target Parachain + BridgeHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + BridgeHubRococo, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + // Receive embedded XCM message within `ExportMessage` in Parachain destination + AssetHubRococo::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + AssetHubRococo, + vec![ + RuntimeEvent::MessageQueue(pallet_message_queue::Event::ProcessingFailed { + .. + }) => {}, + ] + ); + }); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs index 32639b8614b..8dff6c29295 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -18,7 +18,7 @@ use bridge_hub_westend_runtime::xcm_config::XcmConfig; #[test] fn teleport_to_other_system_parachains_works() { - let amount = BRIDGE_HUB_ROCOCO_ED * 100; + let amount = BRIDGE_HUB_WESTEND_ED * 100; let native_asset: MultiAssets = (Parent, amount).into(); test_parachain_is_trusted_teleporter!( -- GitLab From 7cfc233cdc6e6c709594ff26640a350c2e6fb6a8 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 14 Nov 2023 15:03:19 +0100 Subject: [PATCH 045/199] PVF: fix detection of unshare-and-change-root security capability (#2304) --- Cargo.lock | 1 + .../node/core/candidate-validation/src/lib.rs | 2 +- polkadot/node/core/pvf/Cargo.toml | 1 + .../benches/host_prepare_rococo_runtime.rs | 2 +- .../node/core/pvf/common/src/worker/mod.rs | 6 +++--- polkadot/node/core/pvf/src/host.rs | 8 ++++++-- polkadot/node/core/pvf/src/security.rs | 20 +++++++++++++++++-- polkadot/node/core/pvf/tests/it/main.rs | 2 +- 8 files changed, 32 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 32d9099b386..a9b6c68b50f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12453,6 +12453,7 @@ dependencies = [ "polkadot-node-core-pvf-prepare-worker", "polkadot-node-metrics", "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-parachain-primitives", "polkadot-primitives", "procfs", diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 89ea0272884..4232e5f1cdd 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -150,7 +150,7 @@ async fn run( ), pvf_metrics, ) - .await; + .await?; ctx.spawn_blocking("pvf-validation-host", task.boxed())?; loop { diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 430f7cd5e8e..3e72ca9e532 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -27,6 +27,7 @@ polkadot-core-primitives = { path = "../../../core-primitives" } polkadot-node-core-pvf-common = { path = "common" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-primitives = { path = "../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } polkadot-primitives = { path = "../../../primitives" } sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index acd80526262..d0cefae6cdb 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -47,7 +47,7 @@ impl TestHost { execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()).await; + let (host, task) = start(config, Metrics::default()).await.unwrap(); let _ = handle.spawn(task); Self { host: Mutex::new(host) } } diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index 274a2fc8039..f6a67b98321 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -92,13 +92,13 @@ macro_rules! decl_worker_main { std::process::exit(status) }, "--check-can-unshare-user-namespace-and-change-root" => { + #[cfg(target_os = "linux")] + let cache_path_tempdir = std::path::Path::new(&args[2]); #[cfg(target_os = "linux")] let status = if let Err(err) = security::unshare_user_namespace_and_change_root( $crate::worker::WorkerKind::CheckPivotRoot, worker_pid, - // We're not accessing any files, so we can try to pivot_root in the temp - // dir without conflicts with other processes. - &std::env::temp_dir(), + &cache_path_tempdir, ) { // Write the error to stderr, log it on the host-side. eprintln!("{}", err); diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 7b383e8034a..5919b9ba32c 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -35,6 +35,7 @@ use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareResult}, pvf::PvfPrepData, }; +use polkadot_node_subsystem::SubsystemResult; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ collections::HashMap, @@ -203,7 +204,10 @@ impl Config { /// The future should not return normally but if it does then that indicates an unrecoverable error. /// In that case all pending requests will be canceled, dropping the result senders and new ones /// will be rejected. -pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Future) { +pub async fn start( + config: Config, + metrics: Metrics, +) -> SubsystemResult<(ValidationHost, impl Future)> { gum::debug!(target: LOG_TARGET, ?config, "starting PVF validation host"); // Run checks for supported security features once per host startup. Warn here if not enabled. @@ -273,7 +277,7 @@ pub async fn start(config: Config, metrics: Metrics) -> (ValidationHost, impl Fu }; }; - (validation_host, task) + Ok((validation_host, task)) } /// A mapping from an artifact ID which is in preparation state to the list of pending execution diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs index 295dd7df94d..0c0c5f40166 100644 --- a/polkadot/node/core/pvf/src/security.rs +++ b/polkadot/node/core/pvf/src/security.rs @@ -27,14 +27,19 @@ const SECURE_MODE_ANNOUNCEMENT: &'static str = \nMore information: https://wiki.polkadot.network/docs/maintain-guides-secure-validator#secure-validator-mode"; /// Run checks for supported security features. +/// +/// # Return +/// +/// Returns the set of security features that we were able to enable. If an error occurs while +/// enabling a security feature we set the corresponding status to `false`. pub async fn check_security_status(config: &Config) -> SecurityStatus { - let Config { prepare_worker_program_path, .. } = config; + let Config { prepare_worker_program_path, cache_path, .. } = config; // TODO: add check that syslog is available and that seccomp violations are logged? let (landlock, seccomp, change_root) = join!( check_landlock(prepare_worker_program_path), check_seccomp(prepare_worker_program_path), - check_can_unshare_user_namespace_and_change_root(prepare_worker_program_path) + check_can_unshare_user_namespace_and_change_root(prepare_worker_program_path, cache_path) ); let security_status = SecurityStatus { @@ -149,11 +154,22 @@ fn print_secure_mode_message(errs: Vec) -> bool { async fn check_can_unshare_user_namespace_and_change_root( #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] prepare_worker_program_path: &Path, + #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] cache_path: &Path, ) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { + let cache_dir_tempdir = + crate::worker_intf::tmppath_in("check-can-unshare", cache_path) + .await + .map_err( + |err| + SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not create a temporary directory in {:?}: {}", cache_path, err) + ) + )?; match tokio::process::Command::new(prepare_worker_program_path) .arg("--check-can-unshare-user-namespace-and-change-root") + .arg(cache_dir_tempdir) .output() .await { diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index f4fd7f802f5..801b60884fa 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -61,7 +61,7 @@ impl TestHost { execute_worker_path, ); f(&mut config); - let (host, task) = start(config, Metrics::default()).await; + let (host, task) = start(config, Metrics::default()).await.unwrap(); let _ = tokio::task::spawn(task); Self { cache_dir, host: Mutex::new(host) } } -- GitLab From b70d418f89a722b0543ba2fb1c014d5ba6e65186 Mon Sep 17 00:00:00 2001 From: Lulu Date: Tue, 14 Nov 2023 14:23:08 +0000 Subject: [PATCH 046/199] Add environment to claim workflow (#2318) Turns out to access environment secrets the workflow must explicitly opt in to the environment. --- .github/workflows/claim-crates.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/claim-crates.yml index a1d28d42828..345d24c7566 100644 --- a/.github/workflows/claim-crates.yml +++ b/.github/workflows/claim-crates.yml @@ -8,6 +8,7 @@ on: jobs: claim-crates: runs-on: ubuntu-latest + environment: master steps: - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 -- GitLab From cfe5e62626ac61e452a77ea0545aaeaa2cc1176e Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:54:36 +0200 Subject: [PATCH 047/199] chainHead: Support multiple hashes for `chainHead_unpin` method (#2295) This PR adds support for multiple hashes being passed to the `chainHeda_unpin` parameters. The `hash` parameter is renamed to `hash_or_hashes` per https://github.com/paritytech/json-rpc-interface-spec/pull/111. While at it, a new integration test is added to check the unpinning of multiple hashes. The API is checked against a hash or a vector of hashes. cc @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- Cargo.lock | 1 + substrate/client/rpc-spec-v2/Cargo.toml | 1 + .../client/rpc-spec-v2/src/chain_head/api.rs | 11 +- .../rpc-spec-v2/src/chain_head/chain_head.rs | 12 +- .../src/chain_head/subscription/inner.rs | 38 ++-- .../src/chain_head/subscription/mod.rs | 17 +- .../rpc-spec-v2/src/chain_head/tests.rs | 173 +++++++++++++++++- 7 files changed, 224 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a9b6c68b50f..c57a5ce1393 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15970,6 +15970,7 @@ dependencies = [ "sp-consensus", "sp-core", "sp-maybe-compressed-blob", + "sp-rpc", "sp-runtime", "sp-version", "substrate-test-runtime", diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index cfe7f8a117d..ca61286ddfa 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -21,6 +21,7 @@ sc-transaction-pool-api = { path = "../transaction-pool/api" } sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } sp-api = { path = "../../primitives/api" } +sp-rpc = { path = "../../primitives/rpc" } sp-blockchain = { path = "../../primitives/blockchain" } sp-version = { path = "../../primitives/version" } sc-client-api = { path = "../api" } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index d93c4018b60..427b5499bc1 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -21,6 +21,7 @@ //! API trait of the chain head. use crate::chain_head::event::{FollowEvent, MethodResponse, StorageQuery}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use sp_rpc::list::ListOrValue; #[rpc(client, server)] pub trait ChainHeadApi { @@ -109,16 +110,22 @@ pub trait ChainHeadApi { call_parameters: String, ) -> RpcResult; - /// Unpin a block reported by the `follow` method. + /// Unpin a block or multiple blocks reported by the `follow` method. /// /// Ongoing operations that require the provided block /// will continue normally. /// + /// When this method returns an error, it is guaranteed that no blocks have been unpinned. + /// /// # Unstable /// /// This method is unstable and subject to change in the future. #[method(name = "chainHead_unstable_unpin", blocking)] - fn chain_head_unstable_unpin(&self, follow_subscription: String, hash: Hash) -> RpcResult<()>; + fn chain_head_unstable_unpin( + &self, + follow_subscription: String, + hash_or_hashes: ListOrValue, + ) -> RpcResult<()>; /// Resumes a storage fetch started with `chainHead_storage` after it has generated an /// `operationWaitingForContinue` event. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index a8c1c4f7e08..2d01c302037 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -48,6 +48,7 @@ use sc_client_api::{ use sp_api::CallApiAt; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_core::{traits::CallContext, Bytes}; +use sp_rpc::list::ListOrValue; use sp_runtime::traits::Block as BlockT; use std::{marker::PhantomData, sync::Arc, time::Duration}; @@ -432,9 +433,16 @@ where fn chain_head_unstable_unpin( &self, follow_subscription: String, - hash: Block::Hash, + hash_or_hashes: ListOrValue, ) -> RpcResult<()> { - match self.subscriptions.unpin_block(&follow_subscription, hash) { + let result = match hash_or_hashes { + ListOrValue::Value(hash) => + self.subscriptions.unpin_blocks(&follow_subscription, [hash]), + ListOrValue::List(hashes) => + self.subscriptions.unpin_blocks(&follow_subscription, hashes), + }; + + match result { Ok(()) => Ok(()), Err(SubscriptionManagementError::SubscriptionAbsent) => { // Invalid invalid subscription ID. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index 8a75029a994..abd42ad9678 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -750,22 +750,36 @@ impl> SubscriptionsInner { } } - pub fn unpin_block( + pub fn unpin_blocks( &mut self, sub_id: &str, - hash: Block::Hash, + hashes: impl IntoIterator + Clone, ) -> Result<(), SubscriptionManagementError> { let Some(sub) = self.subs.get_mut(sub_id) else { return Err(SubscriptionManagementError::SubscriptionAbsent) }; - // Check that unpin was not called before and the block was pinned - // for this subscription. - if !sub.unregister_block(hash) { - return Err(SubscriptionManagementError::BlockHashAbsent) + // Ensure that all blocks are part of the subscription before removing individual + // blocks. + for hash in hashes.clone() { + if !sub.contains_block(hash) { + return Err(SubscriptionManagementError::BlockHashAbsent); + } + } + + // Note: this needs to be separate from the global mappings to avoid barrow checker + // thinking we borrow `&mut self` twice: once from `self.subs.get_mut` and once from + // `self.global_unregister_block`. Although the borrowing is correct, since different + // fields of the structure are borrowed, one at a time. + for hash in hashes.clone() { + sub.unregister_block(hash); + } + + // Block have been removed from the subscription. Remove them from the global tracking. + for hash in hashes { + self.global_unregister_block(hash); } - self.global_unregister_block(hash); Ok(()) } @@ -1029,11 +1043,11 @@ mod tests { assert_eq!(block.has_runtime(), true); let invalid_id = "abc-invalid".to_string(); - let err = subs.unpin_block(&invalid_id, hash).unwrap_err(); + let err = subs.unpin_blocks(&invalid_id, vec![hash]).unwrap_err(); assert_eq!(err, SubscriptionManagementError::SubscriptionAbsent); // Unpin the block. - subs.unpin_block(&id, hash).unwrap(); + subs.unpin_blocks(&id, vec![hash]).unwrap(); let err = subs.lock_block(&id, hash, 1).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); } @@ -1077,13 +1091,13 @@ mod tests { // Ensure the block propagated to the subscription. subs.subs.get(&id_second).unwrap().blocks.get(&hash).unwrap(); - subs.unpin_block(&id, hash).unwrap(); + subs.unpin_blocks(&id, vec![hash]).unwrap(); assert_eq!(*subs.global_blocks.get(&hash).unwrap(), 1); // Cannot unpin a block twice for the same subscription. - let err = subs.unpin_block(&id, hash).unwrap_err(); + let err = subs.unpin_blocks(&id, vec![hash]).unwrap_err(); assert_eq!(err, SubscriptionManagementError::BlockHashAbsent); - subs.unpin_block(&id_second, hash).unwrap(); + subs.unpin_blocks(&id_second, vec![hash]).unwrap(); // Block unregistered from the memory. assert!(subs.global_blocks.get(&hash).is_none()); } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs index b25b1a4913b..c830e662da2 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/mod.rs @@ -94,22 +94,23 @@ impl> SubscriptionManagement { inner.pin_block(sub_id, hash) } - /// Unpin the block from the subscription. + /// Unpin the blocks from the subscription. /// - /// The last subscription that unpins the block is also unpinning the block - /// from the backend. + /// Blocks are reference counted and when the last subscription unpins a given block, the block + /// is also unpinned from the backend. /// /// This method is called only once per subscription. /// - /// Returns an error if the block is not pinned for the subscription or - /// the subscription ID is invalid. - pub fn unpin_block( + /// Returns an error if the subscription ID is invalid, or any of the blocks are not pinned + /// for the subscriptions. When an error is returned, it is guaranteed that no blocks have + /// been unpinned. + pub fn unpin_blocks( &self, sub_id: &str, - hash: Block::Hash, + hashes: impl IntoIterator + Clone, ) -> Result<(), SubscriptionManagementError> { let mut inner = self.inner.write(); - inner.unpin_block(sub_id, hash) + inner.unpin_blocks(sub_id, hashes) } /// Ensure the block remains pinned until the return object is dropped. diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index c3f5564ebc4..15b258c4756 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -1591,14 +1591,17 @@ async fn follow_with_unpin() { // Unpin an invalid subscription ID must return Ok(()). let invalid_hash = hex_string(&INVALID_HASH); let _res: () = api - .call("chainHead_unstable_unpin", ["invalid_sub_id", &invalid_hash]) + .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) .await .unwrap(); // Valid subscription with invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); let err = api - .call::<_, serde_json::Value>("chainHead_unstable_unpin", [&sub_id, &invalid_hash]) + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &invalid_hash], + ) .await .unwrap_err(); assert_matches!(err, @@ -1606,7 +1609,10 @@ async fn follow_with_unpin() { ); // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &block_hash]).await.unwrap(); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) + .await + .unwrap(); // Block tree: // finalized_block -> block -> block2 @@ -1645,6 +1651,160 @@ async fn follow_with_unpin() { assert!(sub.next::>().await.is_none()); } +#[tokio::test] +async fn follow_with_multiple_unpin_hashes() { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let api = ChainHead::new( + client.clone(), + backend, + Arc::new(TaskExecutor::default()), + CHAIN_GENESIS, + ChainHeadConfig { + global_max_pinned_blocks: MAX_PINNED_BLOCKS, + subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), + subscription_max_ongoing_operations: MAX_OPERATIONS, + operation_max_storage_items: MAX_PAGINATION_LIMIT, + }, + ) + .into_rpc(); + + let mut sub = api.subscribe("chainHead_unstable_follow", [false]).await.unwrap(); + let sub_id = sub.subscription_id(); + let sub_id = serde_json::to_string(&sub_id).unwrap(); + + // Import 3 blocks. + let block_1 = BlockBuilderBuilder::new(&*client) + .on_parent_block(client.chain_info().genesis_hash) + .with_parent_block_number(0) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_1_hash = block_1.header.hash(); + client.import(BlockOrigin::Own, block_1.clone()).await.unwrap(); + + let block_2 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_1.hash()) + .with_parent_block_number(1) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_2_hash = block_2.header.hash(); + client.import(BlockOrigin::Own, block_2.clone()).await.unwrap(); + + let block_3 = BlockBuilderBuilder::new(&*client) + .on_parent_block(block_2.hash()) + .with_parent_block_number(2) + .build() + .unwrap() + .build() + .unwrap() + .block; + let block_3_hash = block_3.header.hash(); + client.import(BlockOrigin::Own, block_3.clone()).await.unwrap(); + + // Ensure the imported block is propagated and pinned for this subscription. + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::Initialized(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::NewBlock(_) + ); + assert_matches!( + get_next_event::>(&mut sub).await, + FollowEvent::BestBlockChanged(_) + ); + + // Unpin an invalid subscription ID must return Ok(()). + let invalid_hash = hex_string(&INVALID_HASH); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params!["invalid_sub_id", &invalid_hash]) + .await + .unwrap(); + + // Valid subscription with invalid block hash. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &invalid_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_1_hash]) + .await + .unwrap(); + + // One block hash is invalid. Block 1 is already unpinned. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, vec![&block_1_hash, &block_2_hash, &block_3_hash]], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + // Unpin multiple blocks. + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, vec![&block_2_hash, &block_3_hash]]) + .await + .unwrap(); + + // Check block 2 and 3 are unpinned. + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &block_2_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); + + let err = api + .call::<_, serde_json::Value>( + "chainHead_unstable_unpin", + rpc_params![&sub_id, &block_3_hash], + ) + .await + .unwrap_err(); + assert_matches!(err, + Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + ); +} + #[tokio::test] async fn follow_prune_best_block() { let builder = TestClientBuilder::new(); @@ -1828,7 +1988,7 @@ async fn follow_prune_best_block() { let sub_id = sub.subscription_id(); let sub_id = serde_json::to_string(&sub_id).unwrap(); let hash = format!("{:?}", block_2_hash); - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &hash]).await.unwrap(); + let _res: () = api.call("chainHead_unstable_unpin", rpc_params![&sub_id, &hash]).await.unwrap(); } #[tokio::test] @@ -2305,7 +2465,10 @@ async fn pin_block_references() { wait_pinned_references(&backend, &hash, 1).await; // To not exceed the number of pinned blocks, we need to unpin before the next import. - let _res: () = api.call("chainHead_unstable_unpin", [&sub_id, &block_hash]).await.unwrap(); + let _res: () = api + .call("chainHead_unstable_unpin", rpc_params![&sub_id, &block_hash]) + .await + .unwrap(); // Make sure unpin clears out the reference. let refs = backend.pin_refs(&hash).unwrap(); -- GitLab From cd38ccff7ff49b8f00bbaf799b2056c97ee6e0e9 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:54:54 +0200 Subject: [PATCH 048/199] chainHead: Remove `chainHead_genesis` method (#2296) The method has been removed from the spec (https://github.com/paritytech/json-rpc-interface-spec/tree/main/src), this PR keeps the `chainHead` in sync with that change. @paritytech/subxt-team --------- Signed-off-by: Alexandru Vasile --- .../client/rpc-spec-v2/src/chain_head/api.rs | 8 ---- .../rpc-spec-v2/src/chain_head/chain_head.rs | 11 +---- .../rpc-spec-v2/src/chain_head/tests.rs | 44 +------------------ substrate/client/service/src/builder.rs | 1 - 4 files changed, 2 insertions(+), 62 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/api.rs b/substrate/client/rpc-spec-v2/src/chain_head/api.rs index 427b5499bc1..9ae80137955 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/api.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/api.rs @@ -74,14 +74,6 @@ pub trait ChainHeadApi { hash: Hash, ) -> RpcResult>; - /// Get the chain's genesis hash. - /// - /// # Unstable - /// - /// This method is unstable and subject to change in the future. - #[method(name = "chainHead_unstable_genesisHash", blocking)] - fn chain_head_unstable_genesis_hash(&self) -> RpcResult; - /// Returns storage entries at a specific block's state. /// /// # Unstable diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 2d01c302037..866701a7dbf 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -107,8 +107,6 @@ pub struct ChainHead, Block: BlockT, Client> { executor: SubscriptionTaskExecutor, /// Keep track of the pinned blocks for each subscription. subscriptions: Arc>, - /// The hexadecimal encoded hash of the genesis block. - genesis_hash: String, /// The maximum number of items reported by the `chainHead_storage` before /// pagination is required. operation_max_storage_items: usize, @@ -118,14 +116,12 @@ pub struct ChainHead, Block: BlockT, Client> { impl, Block: BlockT, Client> ChainHead { /// Create a new [`ChainHead`]. - pub fn new>( + pub fn new( client: Arc, backend: Arc, executor: SubscriptionTaskExecutor, - genesis_hash: GenesisHash, config: ChainHeadConfig, ) -> Self { - let genesis_hash = hex_string(&genesis_hash.as_ref()); Self { client, backend: backend.clone(), @@ -137,7 +133,6 @@ impl, Block: BlockT, Client> ChainHead { backend, )), operation_max_storage_items: config.operation_max_storage_items, - genesis_hash, _phantom: PhantomData, } } @@ -315,10 +310,6 @@ where .map_err(Into::into) } - fn chain_head_unstable_genesis_hash(&self) -> RpcResult { - Ok(self.genesis_hash.clone()) - } - fn chain_head_unstable_storage( &self, follow_subscription: String, diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 15b258c4756..11c6798bf0a 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -28,7 +28,7 @@ use futures::Future; use jsonrpsee::{ core::{error::Error, server::rpc_module::Subscription as RpcSubscription}, rpc_params, - types::{error::CallError, EmptyServerParams as EmptyParams}, + types::error::CallError, RpcModule, }; use sc_block_builder::BlockBuilderBuilder; @@ -61,7 +61,6 @@ const MAX_PINNED_BLOCKS: usize = 32; const MAX_PINNED_SECS: u64 = 60; const MAX_OPERATIONS: usize = 16; const MAX_PAGINATION_LIMIT: usize = 5; -const CHAIN_GENESIS: [u8; 32] = [0; 32]; const INVALID_HASH: [u8; 32] = [1; 32]; const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; @@ -111,7 +110,6 @@ async fn setup_api() -> ( client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -162,7 +160,6 @@ async fn follow_subscription_produces_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -231,7 +228,6 @@ async fn follow_with_runtime() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -345,31 +341,6 @@ async fn follow_with_runtime() { assert_eq!(event, expected); } -#[tokio::test] -async fn get_genesis() { - let builder = TestClientBuilder::new(); - let backend = builder.backend(); - let client = Arc::new(builder.build()); - - let api = ChainHead::new( - client.clone(), - backend, - Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, - ChainHeadConfig { - global_max_pinned_blocks: MAX_PINNED_BLOCKS, - subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), - subscription_max_ongoing_operations: MAX_OPERATIONS, - operation_max_storage_items: MAX_PAGINATION_LIMIT, - }, - ) - .into_rpc(); - - let genesis: String = - api.call("chainHead_unstable_genesisHash", EmptyParams::new()).await.unwrap(); - assert_eq!(genesis, hex_string(&CHAIN_GENESIS)); -} - #[tokio::test] async fn get_header() { let (_client, api, _sub, sub_id, block) = setup_api().await; @@ -569,7 +540,6 @@ async fn call_runtime_without_flag() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1228,7 +1198,6 @@ async fn separate_operation_ids_for_subscriptions() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1316,7 +1285,6 @@ async fn follow_generates_initial_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1472,7 +1440,6 @@ async fn follow_exceeding_pinned_blocks() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1549,7 +1516,6 @@ async fn follow_with_unpin() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 2, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -1815,7 +1781,6 @@ async fn follow_prune_best_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2001,7 +1966,6 @@ async fn follow_forks_pruned_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2153,7 +2117,6 @@ async fn follow_report_multiple_pruned_block() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2399,7 +2362,6 @@ async fn pin_block_references() { client.clone(), backend.clone(), Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: 3, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2537,7 +2499,6 @@ async fn follow_finalized_before_new_block() { client_mock.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2652,7 +2613,6 @@ async fn ensure_operation_limits_works() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2757,7 +2717,6 @@ async fn check_continue_operation() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), @@ -2940,7 +2899,6 @@ async fn stop_storage_operation() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 3838accde02..25f998385ba 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -637,7 +637,6 @@ where client.clone(), backend.clone(), task_executor.clone(), - client.info().genesis_hash, // Defaults to sensible limits for the `ChainHead`. sc_rpc_spec_v2::chain_head::ChainHeadConfig::default(), ) -- GitLab From 7d735fc8ae8c9dde906457e27c9d2694892adc8b Mon Sep 17 00:00:00 2001 From: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Date: Tue, 14 Nov 2023 18:22:30 +0200 Subject: [PATCH 049/199] Add simple collator election mechanism (#1340) Fixes https://github.com/paritytech/polkadot-sdk/issues/106 Port of cumulus PR https://github.com/paritytech/cumulus/pull/2960 This PR adds the ability to bid for collator slots even after the max number of collators have already registered. This eliminates the first come, first served mechanism that was in place before. Key changes: - added `update_bond` extrinsic to allow registered candidates to adjust their bonds in order to dynamically control their bids - added `take_candidate_slot` extrinsic to try to replace an already existing candidate by bidding more than them - candidates are now kept in a sorted list in the pallet storage, where the top `DesiredCandidates` out of `MaxCandidates` candidates in the list will be selected by the session pallet as collators - if the candidacy bond is increased through a `set_candidacy_bond` call, candidates which don't meet the new bond requirements are kicked # Checklist - [ ] My PR includes a detailed description as outlined in the "Description" section above - [ ] My PR follows the [labeling requirements](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md#process) of this project (at minimum one label for `T` required) - [ ] I have made corresponding changes to the documentation (if applicable) - [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) - [ ] If this PR alters any external APIs or interfaces used by Polkadot, the corresponding Polkadot PR is ready as well as the corresponding Cumulus PR (optional) --------- Signed-off-by: georgepisaltu --- .../collator-selection/src/benchmarking.rs | 153 ++- cumulus/pallets/collator-selection/src/lib.rs | 437 ++++-- .../pallets/collator-selection/src/tests.rs | 1210 +++++++++++++++-- .../pallets/collator-selection/src/weights.rs | 36 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- .../src/weights/pallet_collator_selection.rs | 26 +- 13 files changed, 1819 insertions(+), 251 deletions(-) diff --git a/cumulus/pallets/collator-selection/src/benchmarking.rs b/cumulus/pallets/collator-selection/src/benchmarking.rs index 49999dc114d..fa95303495d 100644 --- a/cumulus/pallets/collator-selection/src/benchmarking.rs +++ b/cumulus/pallets/collator-selection/src/benchmarking.rs @@ -25,14 +25,11 @@ use codec::Decode; use frame_benchmarking::{ account, impl_benchmark_test_suite, v2::*, whitelisted_caller, BenchmarkError, }; -use frame_support::{ - dispatch::DispatchResult, - traits::{Currency, EnsureOrigin, Get, ReservableCurrency}, -}; +use frame_support::traits::{Currency, EnsureOrigin, Get, ReservableCurrency}; use frame_system::{pallet_prelude::BlockNumberFor, EventRecord, RawOrigin}; use pallet_authorship::EventHandler; use pallet_session::{self as session, SessionManager}; -use sp_std::prelude::*; +use sp_std::{cmp, prelude::*}; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -94,7 +91,7 @@ fn register_candidates(count: u32) { assert!(>::get() > 0u32.into(), "Bond cannot be zero!"); for who in candidates { - T::Currency::make_free_balance_be(&who, >::get() * 2u32.into()); + T::Currency::make_free_balance_be(&who, >::get() * 3u32.into()); >::register_as_candidate(RawOrigin::Signed(who).into()).unwrap(); } } @@ -107,8 +104,11 @@ fn min_candidates() -> u32 { fn min_invulnerables() -> u32 { let min_collators = T::MinEligibleCollators::get(); - let candidates_length = >::get().len(); - min_collators.saturating_sub(candidates_length.try_into().unwrap()) + let candidates_length = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + min_collators.saturating_sub(candidates_length) } #[benchmarks(where T: pallet_authorship::Config + session::Config)] @@ -160,22 +160,19 @@ mod benchmarks { .unwrap(); } // ... and register them. - for (who, _) in candidates { + for (who, _) in candidates.iter() { let deposit = >::get(); - T::Currency::make_free_balance_be(&who, deposit * 1000_u32.into()); - let incoming = CandidateInfo { who: who.clone(), deposit }; - >::try_mutate(|candidates| -> DispatchResult { - if !candidates.iter().any(|candidate| candidate.who == who) { - T::Currency::reserve(&who, deposit)?; - candidates.try_push(incoming).expect("we've respected the bounded vec limit"); - >::insert( - who.clone(), - frame_system::Pallet::::block_number() + T::KickThreshold::get(), - ); - } - Ok(()) + T::Currency::make_free_balance_be(who, deposit * 1000_u32.into()); + >::try_mutate(|list| { + list.try_push(CandidateInfo { who: who.clone(), deposit }).unwrap(); + Ok::<(), BenchmarkError>(()) }) - .expect("only returns ok"); + .unwrap(); + T::Currency::reserve(who, deposit)?; + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); } // now we need to fill up invulnerables @@ -226,10 +223,27 @@ mod benchmarks { } #[benchmark] - fn set_candidacy_bond() -> Result<(), BenchmarkError> { - let bond_amount: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + fn set_candidacy_bond( + c: Linear<0, { T::MaxCandidates::get() }>, + k: Linear<0, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + let initial_bond_amount: BalanceOf = T::Currency::minimum_balance() * 2u32.into(); + >::put(initial_bond_amount); + register_validators::(c); + register_candidates::(c); + let kicked = cmp::min(k, c); let origin = T::UpdateOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + let bond_amount = if k > 0 { + >::mutate(|candidates| { + for info in candidates.iter_mut().skip(kicked as usize) { + info.deposit = T::Currency::minimum_balance() * 3u32.into(); + } + }); + T::Currency::minimum_balance() * 3u32.into() + } else { + T::Currency::minimum_balance() + }; #[extrinsic_call] _(origin as T::RuntimeOrigin, bond_amount); @@ -238,6 +252,35 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn update_bond( + c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>, + ) -> Result<(), BenchmarkError> { + >::put(T::Currency::minimum_balance()); + >::put(c); + + register_validators::(c); + register_candidates::(c); + + let caller = >::get()[0].who.clone(); + v2::whitelist!(caller); + + let bond_amount: BalanceOf = + T::Currency::minimum_balance() + T::Currency::minimum_balance(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), bond_amount); + + assert_last_event::( + Event::CandidateBondUpdated { account_id: caller, deposit: bond_amount }.into(), + ); + assert!( + >::get().iter().last().unwrap().deposit == + T::Currency::minimum_balance() * 2u32.into() + ); + Ok(()) + } + // worse case is when we have all the max-candidate slots filled except one, and we fill that // one. #[benchmark] @@ -267,6 +310,36 @@ mod benchmarks { ); } + #[benchmark] + fn take_candidate_slot(c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>) { + >::put(T::Currency::minimum_balance()); + >::put(1); + + register_validators::(c); + register_candidates::(c); + + let caller: T::AccountId = whitelisted_caller(); + let bond: BalanceOf = T::Currency::minimum_balance() * 10u32.into(); + T::Currency::make_free_balance_be(&caller, bond); + + >::set_keys( + RawOrigin::Signed(caller.clone()).into(), + keys::(c + 1), + Vec::new(), + ) + .unwrap(); + + let target = >::get().iter().last().unwrap().who.clone(); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.clone()), bond / 2u32.into(), target.clone()); + + assert_last_event::( + Event::CandidateReplaced { old: target, new: caller, deposit: bond / 2u32.into() } + .into(), + ); + } + // worse case is the last candidate leaving. #[benchmark] fn leave_intent(c: Linear<{ min_candidates::() + 1 }, { T::MaxCandidates::get() }>) { @@ -276,7 +349,7 @@ mod benchmarks { register_validators::(c); register_candidates::(c); - let leaving = >::get().last().unwrap().who.clone(); + let leaving = >::get().iter().last().unwrap().who.clone(); v2::whitelist!(leaving); #[extrinsic_call] @@ -323,31 +396,37 @@ mod benchmarks { let new_block: BlockNumberFor = 1800u32.into(); let zero_block: BlockNumberFor = 0u32.into(); - let candidates = >::get(); + let candidates: Vec = >::get() + .iter() + .map(|candidate_info| candidate_info.who.clone()) + .collect(); let non_removals = c.saturating_sub(r); for i in 0..c { - >::insert(candidates[i as usize].who.clone(), zero_block); + >::insert(candidates[i as usize].clone(), zero_block); } if non_removals > 0 { for i in 0..non_removals { - >::insert(candidates[i as usize].who.clone(), new_block); + >::insert(candidates[i as usize].clone(), new_block); } } else { for i in 0..c { - >::insert(candidates[i as usize].who.clone(), new_block); + >::insert(candidates[i as usize].clone(), new_block); } } let min_candidates = min_candidates::(); - let pre_length = >::get().len(); + let pre_length = >::decode_len().unwrap_or_default(); frame_system::Pallet::::set_block_number(new_block); - assert!(>::get().len() == c as usize); - + let current_length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + assert!(c == current_length); #[block] { as SessionManager<_>>::new_session(0); @@ -357,16 +436,20 @@ mod benchmarks { // candidates > removals and remaining candidates > min candidates // => remaining candidates should be shorter than before removal, i.e. some were // actually removed. - assert!(>::get().len() < pre_length); + assert!(>::decode_len().unwrap_or_default() < pre_length); } else if c > r && non_removals < min_candidates { // candidates > removals and remaining candidates would be less than min candidates // => remaining candidates should equal min candidates, i.e. some were removed up to // the minimum, but then any more were "forced" to stay in candidates. - assert!(>::get().len() == min_candidates as usize); + let current_length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + assert!(min_candidates == current_length); } else { // removals >= candidates, non removals must == 0 // can't remove more than exist - assert!(>::get().len() == pre_length); + assert!(>::decode_len().unwrap_or_default() == pre_length); } } diff --git a/cumulus/pallets/collator-selection/src/lib.rs b/cumulus/pallets/collator-selection/src/lib.rs index 24493ce9d9c..7449f4d68c7 100644 --- a/cumulus/pallets/collator-selection/src/lib.rs +++ b/cumulus/pallets/collator-selection/src/lib.rs @@ -35,16 +35,36 @@ //! //! 1. [`Invulnerables`]: a set of collators appointed by governance. These accounts will always be //! collators. -//! 2. [`Candidates`]: these are *candidates to the collation task* and may or may not be elected as -//! a final collator. +//! 2. [`CandidateList`]: these are *candidates to the collation task* and may or may not be elected +//! as a final collator. //! -//! The current implementation resolves congestion of [`Candidates`] in a first-come-first-serve -//! manner. +//! The current implementation resolves congestion of [`CandidateList`] through a simple auction +//! mechanism. Candidates bid for the collator slots and at the end of the session, the auction ends +//! and the top candidates are selected to become collators. The number of selected candidates is +//! determined by the value of `DesiredCandidates`. +//! +//! Before the list reaches full capacity, candidates can register by placing the minimum bond +//! through `register_as_candidate`. Then, if an account wants to participate in the collator slot +//! auction, they have to replace an existing candidate by placing a greater deposit through +//! `take_candidate_slot`. Existing candidates can increase their bids through `update_bond`. +//! +//! At any point, an account can take the place of another account in the candidate list if they put +//! up a greater deposit than the target. While new joiners would like to deposit as little as +//! possible to participate in the auction, the replacement threat incentivizes candidates to bid as +//! close to their budget as possible in order to avoid being replaced. +//! +//! Candidates which are not on "winning" slots in the list can also decrease their deposits through +//! `update_bond`, but candidates who are on top slots and try to decrease their deposits will fail +//! in order to enforce auction mechanics and have meaningful bids. //! //! Candidates will not be allowed to get kicked or `leave_intent` if the total number of collators //! would fall below `MinEligibleCollators`. This is to ensure that some collators will always //! exist, i.e. someone is eligible to produce a block. //! +//! When a new session starts, candidates with the highest deposits will be selected in order until +//! the desired number of collators is reached. Candidates can increase or decrease their deposits +//! between sessions in order to ensure they receive a slot in the collator list. +//! //! ### Rewards //! //! The Collator Selection pallet maintains an on-chain account (the "Pot"). In each block, the @@ -56,8 +76,8 @@ //! //! To initiate rewards, an ED needs to be transferred to the pot address. //! -//! Note: Eventually the Pot distribution may be modified as discussed in -//! [this issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). +//! Note: Eventually the Pot distribution may be modified as discussed in [this +//! issue](https://github.com/paritytech/statemint/issues/21#issuecomment-810481073). #![cfg_attr(not(feature = "std"), no_std)] @@ -182,9 +202,12 @@ pub mod pallet { /// The (community, limited) collation candidates. `Candidates` and `Invulnerables` should be /// mutually exclusive. + /// + /// This list is sorted in ascending order by deposit and when the deposits are equal, the least + /// recently updated is considered greater. #[pallet::storage] - #[pallet::getter(fn candidates)] - pub type Candidates = StorageValue< + #[pallet::getter(fn candidate_list)] + pub type CandidateList = StorageValue< _, BoundedVec>, T::MaxCandidates>, ValueQuery, @@ -261,8 +284,12 @@ pub mod pallet { NewCandidacyBond { bond_amount: BalanceOf }, /// A new candidate joined. CandidateAdded { account_id: T::AccountId, deposit: BalanceOf }, + /// Bond of a candidate updated. + CandidateBondUpdated { account_id: T::AccountId, deposit: BalanceOf }, /// A candidate was removed. CandidateRemoved { account_id: T::AccountId }, + /// An account was replaced in the candidate list by another one. + CandidateReplaced { old: T::AccountId, new: T::AccountId, deposit: BalanceOf }, /// An account was unable to be added to the Invulnerables because they did not have keys /// registered. Other Invulnerables may have been set. InvalidInvulnerableSkipped { account_id: T::AccountId }, @@ -288,12 +315,38 @@ pub mod pallet { NoAssociatedValidatorId, /// Validator ID is not yet registered. ValidatorNotRegistered, + /// Could not insert in the candidate list. + InsertToCandidateListFailed, + /// Could not remove from the candidate list. + RemoveFromCandidateListFailed, + /// New deposit amount would be below the minimum candidacy bond. + DepositTooLow, + /// Could not update the candidate list. + UpdateCandidateListFailed, + /// Deposit amount is too low to take the target's slot in the candidate list. + InsufficientBond, + /// The target account to be replaced in the candidate list is not a candidate. + TargetIsNotCandidate, + /// The updated deposit amount is equal to the amount already reserved. + IdenticalDeposit, + /// Cannot lower candidacy bond while occupying a future collator slot in the list. + InvalidUnreserve, } #[pallet::hooks] impl Hooks> for Pallet { fn integrity_test() { assert!(T::MinEligibleCollators::get() > 0, "chain must require at least one collator"); + assert!( + T::MaxInvulnerables::get().saturating_add(T::MaxCandidates::get()) >= + T::MinEligibleCollators::get(), + "invulnerables and candidates must be able to satisfy collator demand" + ); + } + + #[cfg(feature = "try-runtime")] + fn try_state(_: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { + Self::do_try_state() } } @@ -307,8 +360,8 @@ pub mod pallet { /// acceptable Invulnerables, and is not proposing a _set_ of new Invulnerables. /// /// This call does not maintain mutual exclusivity of `Invulnerables` and `Candidates`. It - /// is recommended to use a batch of `add_invulnerable` and `remove_invulnerable` instead. - /// A `batch_all` can also be used to enforce atomicity. If any candidates are included in + /// is recommended to use a batch of `add_invulnerable` and `remove_invulnerable` instead. A + /// `batch_all` can also be used to enforce atomicity. If any candidates are included in /// `new`, they should be removed with `remove_invulnerable_candidate` after execution. /// /// Must be called by the `UpdateOrigin`. @@ -319,8 +372,9 @@ pub mod pallet { // don't wipe out the collator set if new.is_empty() { + // Casting `u32` to `usize` should be safe on all machines running this. ensure!( - Candidates::::decode_len().unwrap_or_default() >= + CandidateList::::decode_len().unwrap_or_default() >= T::MinEligibleCollators::get() as usize, Error::::TooFewEligibleCollators ); @@ -401,17 +455,47 @@ pub mod pallet { /// Set the candidacy bond amount. /// + /// If the candidacy bond is increased by this call, all current candidates which have a + /// deposit lower than the new bond will be kicked from the list and get their deposits + /// back. + /// /// The origin for this call must be the `UpdateOrigin`. #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::set_candidacy_bond())] + #[pallet::weight(T::WeightInfo::set_candidacy_bond( + T::MaxCandidates::get(), + T::MaxCandidates::get() + ))] pub fn set_candidacy_bond( origin: OriginFor, bond: BalanceOf, ) -> DispatchResultWithPostInfo { T::UpdateOrigin::ensure_origin(origin)?; - >::put(bond); + let bond_increased = >::mutate(|old_bond| -> bool { + let bond_increased = *old_bond < bond; + *old_bond = bond; + bond_increased + }); + let initial_len = >::decode_len().unwrap_or_default(); + let kicked = (bond_increased && initial_len > 0) + .then(|| { + // Closure below returns the number of candidates which were kicked because + // their deposits were lower than the new candidacy bond. + >::mutate(|candidates| -> usize { + let first_safe_candidate = candidates + .iter() + .position(|candidate| candidate.deposit >= bond) + .unwrap_or(initial_len); + let kicked_candidates = candidates.drain(..first_safe_candidate); + for candidate in kicked_candidates { + T::Currency::unreserve(&candidate.who, candidate.deposit); + >::remove(candidate.who); + } + first_safe_candidate + }) + }) + .unwrap_or_default(); Self::deposit_event(Event::NewCandidacyBond { bond_amount: bond }); - Ok(().into()) + Ok(Some(T::WeightInfo::set_candidacy_bond(initial_len as u32, kicked as u32)).into()) } /// Register this account as a collator candidate. The account must (a) already have @@ -424,8 +508,11 @@ pub mod pallet { let who = ensure_signed(origin)?; // ensure we are below limit. - let length = >::decode_len().unwrap_or_default(); - ensure!((length as u32) < Self::desired_candidates(), Error::::TooManyCandidates); + let length: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .unwrap_or_default(); + ensure!(length < T::MaxCandidates::get(), Error::::TooManyCandidates); ensure!(!Self::invulnerables().contains(&who), Error::::AlreadyInvulnerable); let validator_key = T::ValidatorIdOf::convert(who.clone()) @@ -437,25 +524,27 @@ pub mod pallet { let deposit = Self::candidacy_bond(); // First authored block is current block plus kick threshold to handle session delay - let incoming = CandidateInfo { who: who.clone(), deposit }; - - let current_count = - >::try_mutate(|candidates| -> Result { - if candidates.iter().any(|candidate| candidate.who == who) { - Err(Error::::AlreadyCandidate)? - } else { - T::Currency::reserve(&who, deposit)?; - candidates.try_push(incoming).map_err(|_| Error::::TooManyCandidates)?; - >::insert( - who.clone(), - frame_system::Pallet::::block_number() + T::KickThreshold::get(), - ); - Ok(candidates.len()) - } - })?; + >::try_mutate(|candidates| -> Result<(), DispatchError> { + ensure!( + !candidates.iter().any(|candidate_info| candidate_info.who == who), + Error::::AlreadyCandidate + ); + T::Currency::reserve(&who, deposit)?; + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + candidates + .try_insert(0, CandidateInfo { who: who.clone(), deposit }) + .map_err(|_| Error::::InsertToCandidateListFailed)?; + Ok(()) + })?; Self::deposit_event(Event::CandidateAdded { account_id: who, deposit }); - Ok(Some(T::WeightInfo::register_as_candidate(current_count as u32)).into()) + // Safe to do unchecked add here because we ensure above that `length < + // T::MaxCandidates::get()`, and since `T::MaxCandidates` is `u32` it can be at most + // `u32::MAX`, therefore `length + 1` cannot overflow. + Ok(Some(T::WeightInfo::register_as_candidate(length + 1)).into()) } /// Deregister `origin` as a collator candidate. Note that the collator can only leave on @@ -468,13 +557,14 @@ pub mod pallet { pub fn leave_intent(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!( - Self::eligible_collators() > T::MinEligibleCollators::get() as usize, + Self::eligible_collators() > T::MinEligibleCollators::get(), Error::::TooFewEligibleCollators ); + let length = >::decode_len().unwrap_or_default(); // Do remove their last authored block. - let current_count = Self::try_remove_candidate(&who, true)?; + Self::try_remove_candidate(&who, true)?; - Ok(Some(T::WeightInfo::leave_intent(current_count as u32)).into()) + Ok(Some(T::WeightInfo::leave_intent(length.saturating_sub(1) as u32)).into()) } /// Add a new account `who` to the list of `Invulnerables` collators. `who` must have @@ -521,7 +611,7 @@ pub mod pallet { .unwrap_or_default() .try_into() .unwrap_or(T::MaxInvulnerables::get().saturating_sub(1)), - Candidates::::decode_len() + >::decode_len() .unwrap_or_default() .try_into() .unwrap_or(T::MaxCandidates::get()), @@ -540,7 +630,7 @@ pub mod pallet { T::UpdateOrigin::ensure_origin(origin)?; ensure!( - Self::eligible_collators() > T::MinEligibleCollators::get() as usize, + Self::eligible_collators() > T::MinEligibleCollators::get(), Error::::TooFewEligibleCollators ); @@ -554,6 +644,154 @@ pub mod pallet { Self::deposit_event(Event::InvulnerableRemoved { account_id: who }); Ok(()) } + + /// Update the candidacy bond of collator candidate `origin` to a new amount `new_deposit`. + /// + /// Setting a `new_deposit` that is lower than the current deposit while `origin` is + /// occupying a top-`DesiredCandidates` slot is not allowed. + /// + /// This call will fail if `origin` is not a collator candidate, the updated bond is lower + /// than the minimum candidacy bond, and/or the amount cannot be reserved. + #[pallet::call_index(7)] + #[pallet::weight(T::WeightInfo::update_bond(T::MaxCandidates::get()))] + pub fn update_bond( + origin: OriginFor, + new_deposit: BalanceOf, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + ensure!(new_deposit >= >::get(), Error::::DepositTooLow); + // The function below will try to mutate the `CandidateList` entry for the caller to + // update their deposit to the new value of `new_deposit`. The return value is the + // position of the entry in the list, used for weight calculation. + let length = + >::try_mutate(|candidates| -> Result { + let idx = candidates + .iter() + .position(|candidate_info| candidate_info.who == who) + .ok_or_else(|| Error::::NotCandidate)?; + let candidate_count = candidates.len(); + // Remove the candidate from the list. + let mut info = candidates.remove(idx); + let old_deposit = info.deposit; + if new_deposit > old_deposit { + T::Currency::reserve(&who, new_deposit - old_deposit)?; + } else if new_deposit < old_deposit { + // Casting `u32` to `usize` should be safe on all machines running this. + ensure!( + idx.saturating_add(>::get() as usize) < + candidate_count, + Error::::InvalidUnreserve + ); + T::Currency::unreserve(&who, old_deposit - new_deposit); + } else { + return Err(Error::::IdenticalDeposit.into()) + } + + // Update the deposit and insert the candidate in the correct spot in the list. + info.deposit = new_deposit; + let new_pos = candidates + .iter() + .position(|candidate| candidate.deposit >= new_deposit) + .unwrap_or_else(|| candidates.len()); + candidates + .try_insert(new_pos, info) + .map_err(|_| Error::::InsertToCandidateListFailed)?; + + Ok(candidate_count) + })?; + + Self::deposit_event(Event::CandidateBondUpdated { + account_id: who, + deposit: new_deposit, + }); + Ok(Some(T::WeightInfo::update_bond(length as u32)).into()) + } + + /// The caller `origin` replaces a candidate `target` in the collator candidate list by + /// reserving `deposit`. The amount `deposit` reserved by the caller must be greater than + /// the existing bond of the target it is trying to replace. + /// + /// This call will fail if the caller is already a collator candidate or invulnerable, the + /// caller does not have registered session keys, the target is not a collator candidate, + /// and/or the `deposit` amount cannot be reserved. + #[pallet::call_index(8)] + #[pallet::weight(T::WeightInfo::take_candidate_slot(T::MaxCandidates::get()))] + pub fn take_candidate_slot( + origin: OriginFor, + deposit: BalanceOf, + target: T::AccountId, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + ensure!(!Self::invulnerables().contains(&who), Error::::AlreadyInvulnerable); + ensure!(deposit >= Self::candidacy_bond(), Error::::InsufficientBond); + + let validator_key = T::ValidatorIdOf::convert(who.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + ensure!( + T::ValidatorRegistration::is_registered(&validator_key), + Error::::ValidatorNotRegistered + ); + + let length = >::decode_len().unwrap_or_default(); + // The closure below iterates through all elements of the candidate list to ensure that + // the caller isn't already a candidate and to find the target it's trying to replace in + // the list. The return value is a tuple of the position of the candidate to be replaced + // in the list along with its candidate information. + let target_info = >::try_mutate( + |candidates| -> Result>, DispatchError> { + // Find the position in the list of the candidate that is being replaced. + let mut target_info_idx = None; + let mut new_info_idx = None; + for (idx, candidate_info) in candidates.iter().enumerate() { + // While iterating through the candidates trying to find the target, + // also ensure on the same pass that our caller isn't already a + // candidate. + ensure!(candidate_info.who != who, Error::::AlreadyCandidate); + // If we find our target, update the position but do not stop the + // iteration since we're also checking that the caller isn't already a + // candidate. + if candidate_info.who == target { + target_info_idx = Some(idx); + } + // Find the spot where the new candidate would be inserted in the current + // version of the list. + if new_info_idx.is_none() && candidate_info.deposit >= deposit { + new_info_idx = Some(idx); + } + } + let target_info_idx = + target_info_idx.ok_or(Error::::TargetIsNotCandidate)?; + + // Remove the old candidate from the list. + let target_info = candidates.remove(target_info_idx); + ensure!(deposit > target_info.deposit, Error::::InsufficientBond); + + // We have removed one element before `new_info_idx`, so the position we have to + // insert to is reduced by 1. + let new_pos = new_info_idx + .map(|i| i.saturating_sub(1)) + .unwrap_or_else(|| candidates.len()); + let new_info = CandidateInfo { who: who.clone(), deposit }; + // Insert the new candidate in the correct spot in the list. + candidates + .try_insert(new_pos, new_info) + .expect("candidate count previously decremented; qed"); + + Ok(target_info) + }, + )?; + T::Currency::reserve(&who, deposit)?; + T::Currency::unreserve(&target_info.who, target_info.deposit); + >::remove(target_info.who.clone()); + >::insert( + who.clone(), + frame_system::Pallet::::block_number() + T::KickThreshold::get(), + ); + + Self::deposit_event(Event::CandidateReplaced { old: target, new: who, deposit }); + Ok(Some(T::WeightInfo::take_candidate_slot(length as u32)).into()) + } } impl Pallet { @@ -564,84 +802,122 @@ pub mod pallet { /// Return the total number of accounts that are eligible collators (candidates and /// invulnerables). - fn eligible_collators() -> usize { - Candidates::::decode_len() + fn eligible_collators() -> u32 { + >::decode_len() .unwrap_or_default() .saturating_add(Invulnerables::::decode_len().unwrap_or_default()) + .try_into() + .unwrap_or(u32::MAX) } /// Removes a candidate if they exist and sends them back their deposit. fn try_remove_candidate( who: &T::AccountId, remove_last_authored: bool, - ) -> Result { - let current_count = - >::try_mutate(|candidates| -> Result { - let index = candidates - .iter() - .position(|candidate| candidate.who == *who) - .ok_or(Error::::NotCandidate)?; - let candidate = candidates.remove(index); - T::Currency::unreserve(who, candidate.deposit); - if remove_last_authored { - >::remove(who.clone()) - }; - Ok(candidates.len()) - })?; + ) -> Result<(), DispatchError> { + >::try_mutate(|candidates| -> Result<(), DispatchError> { + let idx = candidates + .iter() + .position(|candidate_info| candidate_info.who == *who) + .ok_or(Error::::NotCandidate)?; + let deposit = candidates[idx].deposit; + T::Currency::unreserve(who, deposit); + candidates.remove(idx); + if remove_last_authored { + >::remove(who.clone()) + }; + Ok(()) + })?; Self::deposit_event(Event::CandidateRemoved { account_id: who.clone() }); - Ok(current_count) + Ok(()) } /// Assemble the current set of candidates and invulnerables into the next collator set. /// /// This is done on the fly, as frequent as we are told to do so, as the session manager. - pub fn assemble_collators( - candidates: BoundedVec, - ) -> Vec { + pub fn assemble_collators() -> Vec { + // Casting `u32` to `usize` should be safe on all machines running this. + let desired_candidates = >::get() as usize; let mut collators = Self::invulnerables().to_vec(); - collators.extend(candidates); + collators.extend( + >::get() + .iter() + .rev() + .cloned() + .take(desired_candidates) + .map(|candidate_info| candidate_info.who), + ); collators } /// Kicks out candidates that did not produce a block in the kick threshold and refunds /// their deposits. - pub fn kick_stale_candidates( - candidates: BoundedVec>, T::MaxCandidates>, - ) -> BoundedVec { + /// + /// Return value is the number of candidates left in the list. + pub fn kick_stale_candidates(candidates: impl IntoIterator) -> u32 { let now = frame_system::Pallet::::block_number(); let kick_threshold = T::KickThreshold::get(); let min_collators = T::MinEligibleCollators::get(); candidates .into_iter() .filter_map(|c| { - let last_block = >::get(c.who.clone()); + let last_block = >::get(c.clone()); let since_last = now.saturating_sub(last_block); - let is_invulnerable = Self::invulnerables().contains(&c.who); + let is_invulnerable = Self::invulnerables().contains(&c); let is_lazy = since_last >= kick_threshold; if is_invulnerable { - // They are invulnerable. No reason for them to be in Candidates also. + // They are invulnerable. No reason for them to be in `CandidateList` also. // We don't even care about the min collators here, because an Account // should not be a collator twice. - let _ = Self::try_remove_candidate(&c.who, false); + let _ = Self::try_remove_candidate(&c, false); None } else { - if Self::eligible_collators() <= min_collators as usize || !is_lazy { + if Self::eligible_collators() <= min_collators || !is_lazy { // Either this is a good collator (not lazy) or we are at the minimum // that the system needs. They get to stay. - Some(c.who) + Some(c) } else { // This collator has not produced a block recently enough. Bye bye. - let _ = Self::try_remove_candidate(&c.who, true); + let _ = Self::try_remove_candidate(&c, true); None } } }) - .collect::>() + .count() .try_into() .expect("filter_map operation can't result in a bounded vec larger than its original; qed") } + + /// Ensure the correctness of the state of this pallet. + /// + /// This should be valid before or after each state transition of this pallet. + /// + /// # Invariants + /// + /// ## `DesiredCandidates` + /// + /// * The current desired candidate count should not exceed the candidate list capacity. + /// * The number of selected candidates together with the invulnerables must be greater than + /// or equal to the minimum number of eligible collators. + #[cfg(any(test, feature = "try-runtime"))] + pub fn do_try_state() -> Result<(), sp_runtime::TryRuntimeError> { + let desired_candidates = >::get(); + + frame_support::ensure!( + desired_candidates <= T::MaxCandidates::get(), + "Shouldn't demand more candidates than the pallet config allows." + ); + + frame_support::ensure!( + desired_candidates.saturating_add(T::MaxInvulnerables::get()) >= + T::MinEligibleCollators::get(), + "Invulnerable set together with desired candidates should be able to meet the collator quota." + ); + + Ok(()) + } } /// Keep track of number of authored blocks per authority, uncles are counted as well since @@ -677,14 +953,23 @@ pub mod pallet { >::block_number(), ); - let candidates = Self::candidates(); - let candidates_len_before = candidates.len(); - let active_candidates = Self::kick_stale_candidates(candidates); - let removed = candidates_len_before - active_candidates.len(); - let result = Self::assemble_collators(active_candidates); + // The `expect` below is safe because the list is a `BoundedVec` with a max size of + // `T::MaxCandidates`, which is a `u32`. When `decode_len` returns `Some(len)`, `len` + // must be valid and at most `u32::MAX`, which must always be able to convert to `u32`. + let candidates_len_before: u32 = >::decode_len() + .unwrap_or_default() + .try_into() + .expect("length is at most `T::MaxCandidates`, so it must fit in `u32`; qed"); + let active_candidates_count = Self::kick_stale_candidates( + >::get() + .iter() + .map(|candidate_info| candidate_info.who.clone()), + ); + let removed = candidates_len_before.saturating_sub(active_candidates_count); + let result = Self::assemble_collators(); frame_system::Pallet::::register_extra_weight_unchecked( - T::WeightInfo::new_session(candidates_len_before as u32, removed as u32), + T::WeightInfo::new_session(candidates_len_before, removed), DispatchClass::Mandatory, ); Some(result) diff --git a/cumulus/pallets/collator-selection/src/tests.rs b/cumulus/pallets/collator-selection/src/tests.rs index d4dae513df3..ed2044ccdfa 100644 --- a/cumulus/pallets/collator-selection/src/tests.rs +++ b/cumulus/pallets/collator-selection/src/tests.rs @@ -28,7 +28,7 @@ fn basic_setup_works() { assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert!(CollatorSelection::candidates().is_empty()); + assert_eq!(>::get().iter().count(), 0); // genesis should sort input assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); }); @@ -202,7 +202,8 @@ fn candidate_to_invulnerable_works() { initialize_to_block(1); assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_eq!(Balances::free_balance(3), 100); @@ -226,7 +227,7 @@ fn candidate_to_invulnerable_works() { )); assert!(CollatorSelection::invulnerables().to_vec().contains(&3)); assert_eq!(Balances::free_balance(3), 100); - assert_eq!(CollatorSelection::candidates().len(), 1); + assert_eq!(>::get().iter().count(), 1); assert_ok!(CollatorSelection::add_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -240,7 +241,8 @@ fn candidate_to_invulnerable_works() { )); assert!(CollatorSelection::invulnerables().to_vec().contains(&4)); assert_eq!(Balances::free_balance(4), 100); - assert_eq!(CollatorSelection::candidates().len(), 0); + + assert_eq!(>::get().iter().count(), 0); }); } @@ -266,42 +268,230 @@ fn set_desired_candidates_works() { } #[test] -fn set_candidacy_bond() { +fn set_candidacy_bond_empty_candidate_list() { new_test_ext().execute_with(|| { // given assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); - // can set + // can decrease without candidates assert_ok!(CollatorSelection::set_candidacy_bond( RuntimeOrigin::signed(RootAccount::get()), 7 )); assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert!(>::get().is_empty()); // rejects bad origin. assert_noop!(CollatorSelection::set_candidacy_bond(RuntimeOrigin::signed(1), 8), BadOrigin); + + // can increase without candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + assert_eq!(CollatorSelection::candidacy_bond(), 20); }); } #[test] -fn cannot_register_candidate_if_too_many() { +fn set_candidacy_bond_with_one_candidate() { new_test_ext().execute_with(|| { - // reset desired candidates: - >::put(0); + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); - // can't accept anyone anymore. - assert_noop!( - CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3)), - Error::::TooManyCandidates, + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can decrease with one candidate + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!(>::get(), vec![candidate_3.clone()]); + + // can increase past initial deposit, should kick existing candidate + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn set_candidacy_bond_with_many_candidates_same_deposit() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); + + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 10 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] ); - // reset desired candidates: - >::put(1); + // can decrease with multiple candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] + ); + + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!( + >::get(), + vec![candidate_5.clone(), candidate_4.clone(), candidate_3.clone()] + ); + + // can increase past initial deposit, should kick existing candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn set_candidacy_bond_with_many_candidates_different_deposits() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert!(>::get().is_empty()); + + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 20 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 30 }; + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 20)); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can decrease with multiple candidates + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 7 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 7); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + // can increase up to initial deposit + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 10 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can increase to 4's deposit, should kick 3 + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 20 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 20); + assert_eq!( + >::get(), + vec![candidate_4.clone(), candidate_5.clone()] + ); + + // can increase past 4's deposit, should kick 4 + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 25 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 25); + assert_eq!(>::get(), vec![candidate_5.clone()]); + + // lowering the minimum deposit should have no effect + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 5 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 5); + assert_eq!(>::get(), vec![candidate_5.clone()]); + + // add 3 and 4 back but with higher deposits than minimum + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 20)); + assert_eq!( + >::get(), + vec![candidate_3.clone(), candidate_4.clone(), candidate_5.clone()] + ); + + // can increase the deposit above the current max in the list, all candidates should be + // kicked + assert_ok!(CollatorSelection::set_candidacy_bond( + RuntimeOrigin::signed(RootAccount::get()), + 40 + )); + assert_eq!(CollatorSelection::candidacy_bond(), 40); + assert!(>::get().is_empty()); + }); +} + +#[test] +fn cannot_register_candidate_if_too_many() { + new_test_ext().execute_with(|| { + >::put(1); + + // MaxCandidates: u32 = 20 + // Aside from 3, 4, and 5, create enough accounts to have 21 potential + // candidates. + for i in 6..=23 { + Balances::make_free_balance_be(&i, 100); + let key = MockSessionKeys { aura: UintAuthorityId(i) }; + Session::set_keys(RuntimeOrigin::signed(i).into(), key, Vec::new()).unwrap(); + } + + for c in 3..=22 { + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(c))); + } - // but no more assert_noop!( - CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5)), + CollatorSelection::register_as_candidate(RuntimeOrigin::signed(23)), Error::::TooManyCandidates, ); }) @@ -310,7 +500,7 @@ fn cannot_register_candidate_if_too_many() { #[test] fn cannot_unregister_candidate_if_too_few() { new_test_ext().execute_with(|| { - assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_ok!(CollatorSelection::remove_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -368,8 +558,12 @@ fn cannot_register_dupe_candidate() { new_test_ext().execute_with(|| { // can add 3 as candidate assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + // tuple of (id, deposit). let addition = CandidateInfo { who: 3, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![addition]); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![addition] + ); assert_eq!(CollatorSelection::last_authored_block(3), 10); assert_eq!(Balances::free_balance(3), 90); @@ -404,7 +598,8 @@ fn register_as_candidate_works() { // given assert_eq!(CollatorSelection::desired_candidates(), 2); assert_eq!(CollatorSelection::candidacy_bond(), 10); - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); // take two endowed, non-invulnerables accounts. @@ -417,140 +612,888 @@ fn register_as_candidate_works() { assert_eq!(Balances::free_balance(3), 90); assert_eq!(Balances::free_balance(4), 90); - assert_eq!(CollatorSelection::candidates().len(), 2); + assert_eq!(>::get().iter().count(), 2); }); } #[test] -fn leave_intent() { +fn cannot_take_candidate_slot_if_invulnerable() { new_test_ext().execute_with(|| { - // register a candidate. - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - assert_eq!(Balances::free_balance(3), 90); - - // register too so can leave above min candidates - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); - assert_eq!(Balances::free_balance(5), 90); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); - // cannot leave if not candidate. + // can't 1 because it is invulnerable. assert_noop!( - CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), - Error::::NotCandidate + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(1), 50u64.into(), 2), + Error::::AlreadyInvulnerable, ); - - // bond is returned - assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); - assert_eq!(Balances::free_balance(3), 100); - assert_eq!(CollatorSelection::last_authored_block(3), 0); - }); + }) } #[test] -fn authorship_event_handler() { +fn cannot_take_candidate_slot_if_keys_not_registered() { new_test_ext().execute_with(|| { - // put 100 in the pot + 5 for ED - Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(42), 50u64.into(), 3), + Error::::ValidatorNotRegistered + ); + }) +} - // 4 is the default author. - assert_eq!(Balances::free_balance(4), 100); +#[test] +fn cannot_take_candidate_slot_if_duplicate() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - // triggers `note_author` - Authorship::on_initialize(1); + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 10 }; + let actual_candidates = + >::get().iter().cloned().collect::>(); + assert_eq!(actual_candidates, vec![candidate_4, candidate_3]); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(CollatorSelection::last_authored_block(4), 10); + assert_eq!(Balances::free_balance(3), 90); - let collator = CandidateInfo { who: 4, deposit: 10 }; + // but no more + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(3), 50u64.into(), 4), + Error::::AlreadyCandidate, + ); + }) +} - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 0); +#[test] +fn cannot_take_candidate_slot_if_target_invalid() { + new_test_ext().execute_with(|| { + // can add 3 as candidate + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_3] + ); + assert_eq!(CollatorSelection::last_authored_block(3), 10); + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 100); - // half of the pot goes to the collator who's the author (4 in tests). - assert_eq!(Balances::free_balance(4), 140); - // half + ED stays. - assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); - }); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 50u64.into(), 5), + Error::::TargetIsNotCandidate, + ); + }) } #[test] -fn fees_edgecases() { +fn cannot_take_candidate_slot_if_poor() { new_test_ext().execute_with(|| { - // Nothing panics, no reward when no ED in balance - Authorship::on_initialize(1); - // put some money into the pot at ED - Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); - // 4 is the default author. - assert_eq!(Balances::free_balance(4), 100); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - // triggers `note_author` - Authorship::on_initialize(1); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(33), 0); - let collator = CandidateInfo { who: 4, deposit: 10 }; + // works + assert_ok!(CollatorSelection::take_candidate_slot( + RuntimeOrigin::signed(3), + 20u64.into(), + 4 + )); - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 0); - // Nothing received - assert_eq!(Balances::free_balance(4), 90); - // all fee stays - assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + // poor + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(33), 30u64.into(), 3), + BalancesError::::InsufficientBalance, + ); }); } #[test] -fn session_management_works() { +fn cannot_take_candidate_slot_if_insufficient_deposit() { new_test_ext().execute_with(|| { - initialize_to_block(1); - - assert_eq!(SessionChangeBlock::get(), 0); - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - initialize_to_block(4); - - assert_eq!(SessionChangeBlock::get(), 0); - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - // add a new collator assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - - // session won't see this. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - // but we have a new candidate. - assert_eq!(CollatorSelection::candidates().len(), 1); - - initialize_to_block(10); - assert_eq!(SessionChangeBlock::get(), 10); - // pallet-session has 1 session delay; current validators are the same. - assert_eq!(Session::validators(), vec![1, 2]); - // queued ones are changed, and now we have 3. - assert_eq!(Session::queued_keys().len(), 3); - // session handlers (aura, et. al.) cannot see this yet. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); - - initialize_to_block(20); - assert_eq!(SessionChangeBlock::get(), 20); - // changed are now reflected to session handlers. - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60u64.into())); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 5u64.into(), 3), + Error::::InsufficientBond, + ); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); }); } #[test] -fn kick_mechanism() { +fn cannot_take_candidate_slot_if_deposit_less_than_target() { new_test_ext().execute_with(|| { - // add a new collator assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); - assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); - initialize_to_block(10); - assert_eq!(CollatorSelection::candidates().len(), 2); - initialize_to_block(20); - assert_eq!(SessionChangeBlock::get(), 20); - // 4 authored this block, gets to stay 3 was kicked - assert_eq!(CollatorSelection::candidates().len(), 1); - // 3 will be kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); - let collator = CandidateInfo { who: 4, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator]); - assert_eq!(CollatorSelection::last_authored_block(4), 20); - initialize_to_block(30); - // 3 gets kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); - // kicked collator gets funds back + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60u64.into())); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + assert_noop!( + CollatorSelection::take_candidate_slot(RuntimeOrigin::signed(4), 20u64.into(), 3), + Error::::InsufficientBond, + ); + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 100); + }); +} + +#[test] +fn take_candidate_slot_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_eq!(>::get().iter().count(), 3); + + Balances::make_free_balance_be(&6, 100); + let key = MockSessionKeys { aura: UintAuthorityId(6) }; + Session::set_keys(RuntimeOrigin::signed(6).into(), key, Vec::new()).unwrap(); + + assert_ok!(CollatorSelection::take_candidate_slot( + RuntimeOrigin::signed(6), + 50u64.into(), + 4 + )); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 90); + assert_eq!(Balances::free_balance(6), 50); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 10 }; + let candidate_6 = CandidateInfo { who: 6, deposit: 50 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + let mut actual_candidates = + >::get().iter().cloned().collect::>(); + actual_candidates.sort_by(|info_1, info_2| info_1.deposit.cmp(&info_2.deposit)); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_5, candidate_3, candidate_6] + ); + }); +} + +#[test] +fn increase_candidacy_bond_non_candidate_account() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 20), + Error::::NotCandidate + ); + }); +} + +#[test] +fn increase_candidacy_bond_insufficient_balance() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 110), + BalancesError::::InsufficientBalance + ); + + assert_eq!(Balances::free_balance(3), 90); + }); +} + +#[test] +fn increase_candidacy_bond_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 40)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 60)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 60); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 60); + }); +} + +#[test] +fn decrease_candidacy_bond_non_candidate_account() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + + assert_eq!(Balances::free_balance(5), 100); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10), + Error::::NotCandidate + ); + assert_eq!(Balances::free_balance(5), 100); + }); +} + +#[test] +fn decrease_candidacy_bond_insufficient_funds() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take two endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 60)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 60)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 40); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 0), + Error::::DepositTooLow + ); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(4), 5), + Error::::DepositTooLow + ); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 9), + Error::::DepositTooLow + ); + + assert_eq!(Balances::free_balance(3), 40); + assert_eq!(Balances::free_balance(4), 40); + assert_eq!(Balances::free_balance(5), 40); + }); +} + +#[test] +fn decrease_candidacy_bond_occupying_top_slot() { + new_test_ext().execute_with(|| { + assert_eq!(CollatorSelection::desired_candidates(), 2); + // Register 3 candidates. + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + // And update their bids. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 30u64.into())); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30u64.into())); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60u64.into())); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 30 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 60 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_4, candidate_3, candidate_5] + ); + + // Candidates 5 and 3 can't decrease their deposits because they are the 2 top candidates. + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 29), + Error::::InvalidUnreserve, + ); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 29), + Error::::InvalidUnreserve, + ); + // But candidate 4 should have be able to decrease the deposit up to the minimum. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 29u64.into())); + + // Make candidate 4 outbid candidate 3, taking their spot as the second highest bid. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 35u64.into())); + + // tuple of (id, deposit). + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 35 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 60 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_3, candidate_4, candidate_5] + ); + + // Now candidates 5 and 4 are the 2 top candidates, so they can't decrease their deposits. + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(5), 34), + Error::::InvalidUnreserve, + ); + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(4), 34), + Error::::InvalidUnreserve, + ); + // Candidate 3 should have be able to decrease the deposit up to the minimum now that + // they've fallen out of the top spots. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10u64.into())); + }); +} + +#[test] +fn decrease_candidacy_bond_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 10)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + }); +} + +#[test] +fn update_candidacy_bond_with_identical_amount() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_eq!(Balances::free_balance(3), 90); + assert_eq!(Balances::free_balance(4), 90); + assert_eq!(Balances::free_balance(5), 90); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 30)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 40)); + + assert_eq!(>::get().iter().count(), 3); + assert_eq!(Balances::free_balance(3), 80); + assert_eq!(Balances::free_balance(4), 70); + assert_eq!(Balances::free_balance(5), 60); + + assert_noop!( + CollatorSelection::update_bond(RuntimeOrigin::signed(3), 20), + Error::::IdenticalDeposit + ); + assert_eq!(Balances::free_balance(3), 80); + }); +} + +#[test] +fn candidate_list_works() { + new_test_ext().execute_with(|| { + // given + assert_eq!(CollatorSelection::desired_candidates(), 2); + assert_eq!(CollatorSelection::candidacy_bond(), 10); + + assert_eq!(>::get().iter().count(), 0); + assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); + + // take three endowed, non-invulnerables accounts. + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(4), 100); + assert_eq!(Balances::free_balance(5), 100); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 20)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 30)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 25)); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10)); + + let candidate_3 = CandidateInfo { who: 3, deposit: 30 }; + let candidate_4 = CandidateInfo { who: 4, deposit: 25 }; + let candidate_5 = CandidateInfo { who: 5, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![candidate_5, candidate_4, candidate_3] + ); + }); +} + +#[test] +fn leave_intent() { + new_test_ext().execute_with(|| { + // register a candidate. + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 90); + + // register too so can leave above min candidates + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_eq!(Balances::free_balance(5), 90); + + // cannot leave if not candidate. + assert_noop!( + CollatorSelection::leave_intent(RuntimeOrigin::signed(4)), + Error::::NotCandidate + ); + + // bond is returned + assert_ok!(CollatorSelection::leave_intent(RuntimeOrigin::signed(3))); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(CollatorSelection::last_authored_block(3), 0); + }); +} + +#[test] +fn authorship_event_handler() { + new_test_ext().execute_with(|| { + // put 100 in the pot + 5 for ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 105); + + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + // triggers `note_author` + Authorship::on_initialize(1); + + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + + // half of the pot goes to the collator who's the author (4 in tests). + assert_eq!(Balances::free_balance(4), 140); + // half + ED stays. + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 55); + }); +} + +#[test] +fn fees_edgecases() { + new_test_ext().execute_with(|| { + // Nothing panics, no reward when no ED in balance + Authorship::on_initialize(1); + // put some money into the pot at ED + Balances::make_free_balance_be(&CollatorSelection::account_id(), 5); + // 4 is the default author. + assert_eq!(Balances::free_balance(4), 100); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + // triggers `note_author` + Authorship::on_initialize(1); + + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 0); + // Nothing received + assert_eq!(Balances::free_balance(4), 90); + // all fee stays + assert_eq!(Balances::free_balance(CollatorSelection::account_id()), 5); + }); +} + +#[test] +fn session_management_single_candidate() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 1); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 3. + assert_eq!(Session::queued_keys().len(), 3); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3]); + }); +} + +#[test] +fn session_management_max_candidates() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + }); +} + +#[test] +fn session_management_increase_bid_with_list_update() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5, 3]); + }); +} + +#[test] +fn session_management_candidate_list_eager_sort() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 5, 3]); + }); +} + +#[test] +fn session_management_reciprocal_outbidding() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + initialize_to_block(5); + + // candidates 3 and 4 saw they were outbid and preemptively bid more + // than 5 in the next block. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 70)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 70)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4, 3]); + }); +} + +#[test] +fn session_management_decrease_bid_after_auction() { + new_test_ext().execute_with(|| { + initialize_to_block(1); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(4); + + assert_eq!(SessionChangeBlock::get(), 0); + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(5))); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 60)); + + initialize_to_block(5); + + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(4), 70)); + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(3), 70)); + + initialize_to_block(5); + + // candidate 5 saw it was outbid and wants to take back its bid, but + // not entirely so they still keep their place in the candidate list + // in case there is an opportunity in the future. + assert_ok!(CollatorSelection::update_bond(RuntimeOrigin::signed(5), 10)); + + // session won't see this. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + // but we have a new candidate. + assert_eq!(>::get().iter().count(), 3); + + initialize_to_block(10); + assert_eq!(SessionChangeBlock::get(), 10); + // pallet-session has 1 session delay; current validators are the same. + assert_eq!(Session::validators(), vec![1, 2]); + // queued ones are changed, and now we have 4. + assert_eq!(Session::queued_keys().len(), 4); + // session handlers (aura, et. al.) cannot see this yet. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2]); + + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // changed are now reflected to session handlers. + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4, 3]); + }); +} + +#[test] +fn kick_mechanism() { + new_test_ext().execute_with(|| { + // add a new collator + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); + assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); + initialize_to_block(10); + assert_eq!(>::get().iter().count(), 2); + initialize_to_block(20); + assert_eq!(SessionChangeBlock::get(), 20); + // 4 authored this block, gets to stay 3 was kicked + assert_eq!(>::get().iter().count(), 1); + // 3 will be kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 3, 4]); + // tuple of (id, deposit). + let collator = CandidateInfo { who: 4, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); + assert_eq!(CollatorSelection::last_authored_block(4), 20); + initialize_to_block(30); + // 3 gets kicked after 1 session delay + assert_eq!(SessionHandlerCollators::get(), vec![1, 2, 4]); + // kicked collator gets funds back assert_eq!(Balances::free_balance(3), 100); }); } @@ -559,7 +1502,8 @@ fn kick_mechanism() { fn should_not_kick_mechanism_too_few() { new_test_ext().execute_with(|| { // remove the invulnerables and add new collators 3 and 5 - assert_eq!(CollatorSelection::candidates(), Vec::new()); + + assert_eq!(>::get().iter().count(), 0); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2]); assert_ok!(CollatorSelection::remove_invulnerable( RuntimeOrigin::signed(RootAccount::get()), @@ -573,30 +1517,34 @@ fn should_not_kick_mechanism_too_few() { )); initialize_to_block(10); - assert_eq!(CollatorSelection::candidates().len(), 2); + assert_eq!(>::get().iter().count(), 2); initialize_to_block(20); assert_eq!(SessionChangeBlock::get(), 20); // 4 authored this block, 3 is kicked, 5 stays because of too few collators - assert_eq!(CollatorSelection::candidates().len(), 1); + assert_eq!(>::get().iter().count(), 1); // 3 will be kicked after 1 session delay assert_eq!(SessionHandlerCollators::get(), vec![3, 5]); - let collator = CandidateInfo { who: 5, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator]); + // tuple of (id, deposit). + let collator = CandidateInfo { who: 3, deposit: 10 }; + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator] + ); assert_eq!(CollatorSelection::last_authored_block(4), 20); initialize_to_block(30); // 3 gets kicked after 1 session delay - assert_eq!(SessionHandlerCollators::get(), vec![5]); + assert_eq!(SessionHandlerCollators::get(), vec![3]); // kicked collator gets funds back - assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Balances::free_balance(5), 100); }); } #[test] fn should_kick_invulnerables_from_candidates_on_session_change() { new_test_ext().execute_with(|| { - assert_eq!(CollatorSelection::candidates(), Vec::new()); + assert_eq!(>::get().iter().count(), 0); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(3))); assert_ok!(CollatorSelection::register_as_candidate(RuntimeOrigin::signed(4))); assert_eq!(Balances::free_balance(3), 90); @@ -606,16 +1554,22 @@ fn should_kick_invulnerables_from_candidates_on_session_change() { vec![1, 2, 3] )); + // tuple of (id, deposit). let collator_3 = CandidateInfo { who: 3, deposit: 10 }; let collator_4 = CandidateInfo { who: 4, deposit: 10 }; - assert_eq!(CollatorSelection::candidates(), vec![collator_3, collator_4.clone()]); + let actual_candidates = + >::get().iter().cloned().collect::>(); + assert_eq!(actual_candidates, vec![collator_4.clone(), collator_3]); assert_eq!(CollatorSelection::invulnerables(), vec![1, 2, 3]); // session change initialize_to_block(10); // 3 is removed from candidates - assert_eq!(CollatorSelection::candidates(), vec![collator_4]); + assert_eq!( + >::get().iter().cloned().collect::>(), + vec![collator_4] + ); // but not from invulnerables assert_eq!(CollatorSelection::invulnerables(), vec![1, 2, 3]); // and it got its deposit back diff --git a/cumulus/pallets/collator-selection/src/weights.rs b/cumulus/pallets/collator-selection/src/weights.rs index f8f86fb7dec..1c01ad6cd6f 100644 --- a/cumulus/pallets/collator-selection/src/weights.rs +++ b/cumulus/pallets/collator-selection/src/weights.rs @@ -30,9 +30,11 @@ pub trait WeightInfo { fn add_invulnerable(_b: u32, _c: u32) -> Weight; fn remove_invulnerable(_b: u32) -> Weight; fn set_desired_candidates() -> Weight; - fn set_candidacy_bond() -> Weight; + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight; fn register_as_candidate(_c: u32) -> Weight; fn leave_intent(_c: u32) -> Weight; + fn update_bond(_c: u32) -> Weight; + fn take_candidate_slot(_c: u32) -> Weight; fn note_author() -> Weight; fn new_session(_c: u32, _r: u32) -> Weight; } @@ -49,7 +51,7 @@ impl WeightInfo for SubstrateWeight { fn set_desired_candidates() -> Weight { Weight::from_parts(16_363_000_u64, 0).saturating_add(T::DbWeight::get().writes(1_u64)) } - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { Weight::from_parts(16_840_000_u64, 0).saturating_add(T::DbWeight::get().writes(1_u64)) } fn register_as_candidate(c: u32) -> Weight { @@ -66,6 +68,20 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } + fn update_bond(c: u32) -> Weight { + Weight::from_parts(55_336_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(151_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } + fn take_candidate_slot(c: u32) -> Weight { + Weight::from_parts(71_196_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(198_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(T::DbWeight::get().reads(4_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)) + } fn note_author() -> Weight { Weight::from_parts(71_461_000_u64, 0) .saturating_add(T::DbWeight::get().reads(3_u64)) @@ -136,7 +152,7 @@ impl WeightInfo for () { fn set_desired_candidates() -> Weight { Weight::from_parts(16_363_000_u64, 0).saturating_add(RocksDbWeight::get().writes(1_u64)) } - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { Weight::from_parts(16_840_000_u64, 0).saturating_add(RocksDbWeight::get().writes(1_u64)) } fn register_as_candidate(c: u32) -> Weight { @@ -158,6 +174,20 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } + fn update_bond(c: u32) -> Weight { + Weight::from_parts(55_336_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(151_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } + fn take_candidate_slot(c: u32) -> Weight { + Weight::from_parts(71_196_000_u64, 0) + // Standard Error: 0 + .saturating_add(Weight::from_parts(198_000_u64, 0).saturating_mul(c as u64)) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(4_u64)) + } fn new_session(r: u32, c: u32) -> Weight { Weight::from_parts(0_u64, 0) // Standard Error: 1_010_000 diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs index 5c5a31eb348..c686bd6134a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs index c33e79970ff..b3062984baf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs @@ -121,7 +121,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -175,6 +175,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs index d98abbbc2d3..aeda7bbbb6a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_collator_selection.rs @@ -124,7 +124,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +178,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs index 095e784cf66..1fac2d59ab9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs index cccb7c60924..72d8ba4045a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs index 6ed2c429186..f7c78f7db82 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs index 1fb0b765c06..f7e233189ab 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_collator_selection.rs @@ -123,7 +123,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -177,6 +177,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs index 9cbfa6ce80e..9dcee77082b 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_collator_selection.rs @@ -124,7 +124,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -178,6 +178,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs index 2c729e8dc10..03f3ff602a5 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs @@ -121,7 +121,7 @@ impl pallet_collator_selection::WeightInfo for WeightIn } /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond() -> Weight { + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` @@ -175,6 +175,30 @@ impl pallet_collator_selection::WeightInfo for WeightIn .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } /// Storage: `System::Account` (r:2 w:2) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `System::BlockWeight` (r:1 w:1) -- GitLab From 1cd38c2cd12fda156c755bcb9375e01a1e077bd8 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 14 Nov 2023 18:24:45 +0100 Subject: [PATCH 050/199] Contracts: Bump contracts rococo (#2286) --- .../contracts/contracts-rococo/src/contracts.rs | 11 ++--------- .../runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index 1c99393d5e5..b86f797ee03 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -22,9 +22,7 @@ use frame_support::{ traits::{ConstBool, ConstU32, Nothing}, }; use pallet_contracts::{ - migration::{v12, v13, v14, v15}, - weights::SubstrateWeight, - Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, + weights::SubstrateWeight, Config, DebugInfo, DefaultAddressGenerator, Frame, Schedule, }; use sp_runtime::Perbill; @@ -70,12 +68,7 @@ impl Config for Runtime { type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; type MaxDelegateDependencies = ConstU32<32>; type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; - type Migrations = ( - v12::Migration, - v13::Migration, - v14::Migration, - v15::Migration, - ); + type Migrations = (); type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 2a2f4141033..4c66e780ba9 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -131,7 +131,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 10000, + spec_version: 10001, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, -- GitLab From 3a87390b30929dffdf015c31cadea4dfce9f34eb Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Tue, 14 Nov 2023 19:35:02 +0200 Subject: [PATCH 051/199] chainHead/tests: Fix clippy (#2325) Remove the genesis hash from tests: - Clippy was passing on the PR: https://github.com/paritytech/polkadot-sdk/pull/2296 - Clippy fails on master: https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/4328487 This was a race with merging: https://github.com/paritytech/polkadot-sdk/pull/2295, which introduced another test that used the `CHAIN_GENESIS` Signed-off-by: Alexandru Vasile --- substrate/client/rpc-spec-v2/src/chain_head/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 11c6798bf0a..4e6775fe280 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -1627,7 +1627,6 @@ async fn follow_with_multiple_unpin_hashes() { client.clone(), backend, Arc::new(TaskExecutor::default()), - CHAIN_GENESIS, ChainHeadConfig { global_max_pinned_blocks: MAX_PINNED_BLOCKS, subscription_max_pinned_duration: Duration::from_secs(MAX_PINNED_SECS), -- GitLab From 54f84285bf863c75d9383f7d9d3e7612410f23a4 Mon Sep 17 00:00:00 2001 From: jserrat <35823283+Jpserrat@users.noreply.github.com> Date: Tue, 14 Nov 2023 14:50:18 -0300 Subject: [PATCH 052/199] change prepare worker to use fork instead of threads (#1685) Co-authored-by: Marcin S --- Cargo.lock | 26 + .../node/core/candidate-validation/src/lib.rs | 24 +- .../core/candidate-validation/src/tests.rs | 12 +- .../benches/host_prepare_rococo_runtime.rs | 2 +- polkadot/node/core/pvf/common/Cargo.toml | 2 +- polkadot/node/core/pvf/common/src/error.rs | 55 +- polkadot/node/core/pvf/common/src/execute.rs | 51 +- .../node/core/pvf/common/src/worker/mod.rs | 12 +- .../node/core/pvf/execute-worker/Cargo.toml | 3 + .../node/core/pvf/execute-worker/src/lib.rs | 399 ++++++++--- .../node/core/pvf/prepare-worker/Cargo.toml | 2 + .../node/core/pvf/prepare-worker/src/lib.rs | 634 ++++++++++++------ polkadot/node/core/pvf/src/error.rs | 53 +- polkadot/node/core/pvf/src/execute/queue.rs | 15 +- .../node/core/pvf/src/execute/worker_intf.rs | 49 +- polkadot/node/core/pvf/src/prepare/pool.rs | 21 +- .../node/core/pvf/src/prepare/worker_intf.rs | 24 +- polkadot/node/core/pvf/src/testing.rs | 34 +- polkadot/node/core/pvf/tests/it/main.rs | 135 +--- polkadot/node/core/pvf/tests/it/process.rs | 383 +++++++++++ .../node/core/pvf/tests/it/worker_common.rs | 8 +- .../src/node/utility/pvf-host-and-workers.md | 47 +- .../list-syscalls/execute-worker-syscalls | 6 + .../list-syscalls/prepare-worker-syscalls | 5 + 24 files changed, 1468 insertions(+), 534 deletions(-) create mode 100644 polkadot/node/core/pvf/tests/it/process.rs diff --git a/Cargo.lock b/Cargo.lock index c57a5ce1393..d8308086822 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8627,6 +8627,17 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "nix" +version = "0.27.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +dependencies = [ + "bitflags 2.4.0", + "cfg-if", + "libc", +] + [[package]] name = "no-std-net" version = "0.6.0" @@ -9103,6 +9114,16 @@ dependencies = [ "num-traits", ] +[[package]] +name = "os_pipe" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae859aa07428ca9a929b936690f8b12dc5f11dd8c6992a18ca93919f28bc177" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "os_str_bytes" version = "6.5.1" @@ -12525,6 +12546,9 @@ name = "polkadot-node-core-pvf-execute-worker" version = "1.0.0" dependencies = [ "cpu-time", + "libc", + "nix 0.27.1", + "os_pipe", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-parachain-primitives", @@ -12539,6 +12563,8 @@ dependencies = [ "cfg-if", "criterion 0.4.0", "libc", + "nix 0.27.1", + "os_pipe", "parity-scale-codec", "polkadot-node-core-pvf-common", "polkadot-primitives", diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 4232e5f1cdd..a3d6f047313 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -642,14 +642,19 @@ async fn validate_candidate_exhaustive( }, Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout)) => Ok(ValidationResult::Invalid(InvalidCandidate::Timeout)), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WorkerReportedError(e))) => + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WorkerReportedInvalid(e))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e))), Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError( "ambiguous worker death".to_string(), ))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(err))) => + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), + + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath(err))) => + Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(format!( + "ambiguous job death: {err}" + )))), Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::PrepareError(e))) => { // In principle if preparation of the `WASM` fails, the current candidate can not be the // reason for that. So we can't say whether it is invalid or not. In addition, with @@ -741,9 +746,9 @@ trait ValidationBackend { }; // Allow limited retries for each kind of error. + let mut num_death_retries_left = 1; + let mut num_job_error_retries_left = 1; let mut num_internal_retries_left = 1; - let mut num_awd_retries_left = 1; - let mut num_panic_retries_left = 1; loop { // Stop retrying if we exceeded the timeout. if total_time_start.elapsed() + retry_delay > exec_timeout { @@ -752,11 +757,12 @@ trait ValidationBackend { match validation_result { Err(ValidationError::InvalidCandidate( - WasmInvalidCandidate::AmbiguousWorkerDeath, - )) if num_awd_retries_left > 0 => num_awd_retries_left -= 1, - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic(_))) - if num_panic_retries_left > 0 => - num_panic_retries_left -= 1, + WasmInvalidCandidate::AmbiguousWorkerDeath | + WasmInvalidCandidate::AmbiguousJobDeath(_), + )) if num_death_retries_left > 0 => num_death_retries_left -= 1, + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError(_))) + if num_job_error_retries_left > 0 => + num_job_error_retries_left -= 1, Err(ValidationError::InternalError(_)) if num_internal_retries_left > 0 => num_internal_retries_left -= 1, _ => break, diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index af530a20c4e..cab823e1e63 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -695,11 +695,13 @@ fn candidate_validation_retry_panic_errors() { let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic("foo".into()))), - // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))), + // Throw an AJD error, we should still retry again. + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath( + "baz".into(), + ))), // Throw another panic error. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::Panic("bar".into()))), + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))), ]), validation_data, validation_code, @@ -1216,7 +1218,7 @@ fn precheck_properly_classifies_outcomes() { inner(Err(PrepareError::Prevalidation("foo".to_owned())), PreCheckOutcome::Invalid); inner(Err(PrepareError::Preparation("bar".to_owned())), PreCheckOutcome::Invalid); - inner(Err(PrepareError::Panic("baz".to_owned())), PreCheckOutcome::Invalid); + inner(Err(PrepareError::JobError("baz".to_owned())), PreCheckOutcome::Invalid); inner(Err(PrepareError::TimedOut), PreCheckOutcome::Failed); inner(Err(PrepareError::IoErr("fizz".to_owned())), PreCheckOutcome::Failed); diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index d0cefae6cdb..378374a10b3 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -37,7 +37,7 @@ impl TestHost { where F: FnOnce(&mut Config), { - let (prepare_worker_path, execute_worker_path) = testing::get_and_check_worker_paths(); + let (prepare_worker_path, execute_worker_path) = testing::build_workers_and_get_paths(true); let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 7dc8d307026..e3fda06963e 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -12,6 +12,7 @@ cpu-time = "1.0.0" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" +thiserror = "1.0.31" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } @@ -30,7 +31,6 @@ sp-tracing = { path = "../../../../../substrate/primitives/tracing" } [target.'cfg(target_os = "linux")'.dependencies] landlock = "0.3.0" seccompiler = "0.4.0" -thiserror = "1.0.31" [dev-dependencies] assert_matches = "1.4.0" diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index 82b56562d8c..34475c481f7 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -35,9 +35,9 @@ pub enum PrepareError { /// Instantiation of the WASM module instance failed. #[codec(index = 2)] RuntimeConstruction(String), - /// An unexpected panic has occurred in the preparation worker. + /// An unexpected error has occurred in the preparation job. #[codec(index = 3)] - Panic(String), + JobError(String), /// Failed to prepare the PVF due to the time limit. #[codec(index = 4)] TimedOut, @@ -48,12 +48,12 @@ pub enum PrepareError { /// The temporary file for the artifact could not be created at the given cache path. This /// state is reported by the validation host (not by the worker). #[codec(index = 6)] - CreateTmpFileErr(String), + CreateTmpFile(String), /// The response from the worker is received, but the file cannot be renamed (moved) to the /// final destination location. This state is reported by the validation host (not by the /// worker). #[codec(index = 7)] - RenameTmpFileErr { + RenameTmpFile { err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. @@ -68,11 +68,14 @@ pub enum PrepareError { /// reported by the validation host (not by the worker). #[codec(index = 9)] ClearWorkerDir(String), + /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. + JobDied(String), + #[codec(index = 10)] + /// Some error occurred when interfacing with the kernel. + #[codec(index = 11)] + Kernel(String), } -/// Pre-encoded length-prefixed `PrepareResult::Err(PrepareError::OutOfMemory)` -pub const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08"; - impl PrepareError { /// Returns whether this is a deterministic error, i.e. one that should trigger reliably. Those /// errors depend on the PVF itself and the sc-executor/wasmtime logic. @@ -83,12 +86,15 @@ impl PrepareError { pub fn is_deterministic(&self) -> bool { use PrepareError::*; match self { - Prevalidation(_) | Preparation(_) | Panic(_) | OutOfMemory => true, - TimedOut | + Prevalidation(_) | Preparation(_) | JobError(_) | OutOfMemory => true, IoErr(_) | - CreateTmpFileErr(_) | - RenameTmpFileErr { .. } | - ClearWorkerDir(_) => false, + JobDied(_) | + CreateTmpFile(_) | + RenameTmpFile { .. } | + ClearWorkerDir(_) | + Kernel(_) => false, + // Can occur due to issues with the PVF, but also due to factors like local load. + TimedOut => false, // Can occur due to issues with the PVF, but also due to local errors. RuntimeConstruction(_) => false, } @@ -102,14 +108,16 @@ impl fmt::Display for PrepareError { Prevalidation(err) => write!(f, "prevalidation: {}", err), Preparation(err) => write!(f, "preparation: {}", err), RuntimeConstruction(err) => write!(f, "runtime construction: {}", err), - Panic(err) => write!(f, "panic: {}", err), + JobError(err) => write!(f, "panic: {}", err), TimedOut => write!(f, "prepare: timeout"), IoErr(err) => write!(f, "prepare: io error while receiving response: {}", err), - CreateTmpFileErr(err) => write!(f, "prepare: error creating tmp file: {}", err), - RenameTmpFileErr { err, src, dest } => + JobDied(err) => write!(f, "prepare: prepare job died: {}", err), + CreateTmpFile(err) => write!(f, "prepare: error creating tmp file: {}", err), + RenameTmpFile { err, src, dest } => write!(f, "prepare: error renaming tmp file ({:?} -> {:?}): {}", src, dest, err), OutOfMemory => write!(f, "prepare: out of memory"), ClearWorkerDir(err) => write!(f, "prepare: error clearing worker cache: {}", err), + Kernel(err) => write!(f, "prepare: error interfacing with the kernel: {}", err), } } } @@ -133,9 +141,9 @@ pub enum InternalValidationError { // conversion to `Option`. path: Option, }, - /// An error occurred in the CPU time monitor thread. Should be totally unrelated to - /// validation. - CpuTimeMonitorThread(String), + /// Some error occurred when interfacing with the kernel. + Kernel(String), + /// Some non-deterministic preparation error occurred. NonDeterministicPrepareError(PrepareError), } @@ -158,17 +166,8 @@ impl fmt::Display for InternalValidationError { "validation: host could not clear the worker cache ({:?}) after a job: {}", path, err ), - CpuTimeMonitorThread(err) => - write!(f, "validation: an error occurred in the CPU time monitor thread: {}", err), + Kernel(err) => write!(f, "validation: error interfacing with the kernel: {}", err), NonDeterministicPrepareError(err) => write!(f, "validation: prepare: {}", err), } } } - -#[test] -fn pre_encoded_payloads() { - let oom_enc = PrepareResult::Err(PrepareError::OutOfMemory).encode(); - let mut oom_payload = oom_enc.len().to_le_bytes().to_vec(); - oom_payload.extend(oom_enc); - assert_eq!(oom_payload, OOM_PAYLOAD); -} diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index b89ab089af1..89e7c8e471a 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -28,9 +28,9 @@ pub struct Handshake { pub executor_params: ExecutorParams, } -/// The response from an execution job on the worker. +/// The response from the execution worker. #[derive(Debug, Encode, Decode)] -pub enum Response { +pub enum WorkerResponse { /// The job completed successfully. Ok { /// The result of parachain validation. @@ -41,14 +41,38 @@ pub enum Response { /// The candidate is invalid. InvalidCandidate(String), /// The job timed out. - TimedOut, - /// An unexpected panic has occurred in the execution worker. - Panic(String), + JobTimedOut, + /// The job process has died. We must kill the worker just in case. + /// + /// We cannot treat this as an internal error because malicious code may have killed the job. + /// We still retry it, because in the non-malicious case it is likely spurious. + JobDied(String), + /// An unexpected error occurred in the job process, e.g. failing to spawn a thread, panic, + /// etc. + /// + /// Because malicious code can cause a job error, we must not treat it as an internal error. We + /// still retry it, because in the non-malicious case it is likely spurious. + JobError(String), + /// Some internal error occurred. InternalError(InternalValidationError), } -impl Response { +/// The result of a job on the execution worker. +pub type JobResult = Result; + +/// The successful response from a job on the execution worker. +#[derive(Debug, Encode, Decode)] +pub enum JobResponse { + Ok { + /// The result of parachain validation. + result_descriptor: ValidationResult, + }, + /// The candidate is invalid. + InvalidCandidate(String), +} + +impl JobResponse { /// Creates an invalid response from a context `ctx` and a message `msg` (which can be empty). pub fn format_invalid(ctx: &'static str, msg: &str) -> Self { if msg.is_empty() { @@ -58,3 +82,18 @@ impl Response { } } } + +/// An unexpected error occurred in the execution job process. Because this comes from the job, +/// which executes untrusted code, this error must likewise be treated as untrusted. That is, we +/// cannot raise an internal error based on this. +#[derive(thiserror::Error, Debug, Encode, Decode)] +pub enum JobError { + #[error("The job timed out")] + TimedOut, + #[error("An unexpected panic has occurred in the execution job: {0}")] + Panic(String), + #[error("Could not spawn the requested thread: {0}")] + CouldNotSpawnThread(String), + #[error("An error occurred in the CPU time monitor thread: {0}")] + CpuTimeMonitorThread(String), +} diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index f6a67b98321..86f47acccac 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -205,9 +205,15 @@ impl fmt::Display for WorkerKind { } } -// The worker version must be passed in so that we accurately get the version of the worker, and not -// the version that this crate was compiled with. -pub fn worker_event_loop( +// NOTE: The worker version must be passed in so that we accurately get the version of the worker, +// and not the version that this crate was compiled with. +// +// NOTE: This must not spawn any threads due to safety requirements in `event_loop` and to avoid +// errors in [`security::unshare_user_namespace_and_change_root`]. +// +/// Initializes the worker process, then runs the given event loop, which spawns a new job process +/// to securely handle each incoming request. +pub fn run_worker( worker_kind: WorkerKind, socket_path: PathBuf, #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] mut worker_dir_path: PathBuf, diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 77a9420961c..40e0ff4f0a1 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -9,6 +9,9 @@ license.workspace = true [dependencies] cpu-time = "1.0.0" gum = { package = "tracing-gum", path = "../../../gum" } +os_pipe = "1.1.4" +nix = { version = "0.27.1", features = ["resource", "process"]} +libc = "0.2.139" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 8872f9bc8dd..9ec811686b8 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -25,23 +25,33 @@ pub use polkadot_node_core_pvf_common::{ const LOG_TARGET: &str = "parachain::pvf-execute-worker"; use cpu_time::ProcessTime; +use nix::{ + errno::Errno, + sys::{ + resource::{Usage, UsageWho}, + wait::WaitStatus, + }, + unistd::{ForkResult, Pid}, +}; +use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, Response}, + execute::{Handshake, JobError, JobResponse, JobResult, WorkerResponse}, framed_recv_blocking, framed_send_blocking, worker::{ - cpu_time_monitor_loop, stringify_panic_payload, + cpu_time_monitor_loop, run_worker, stringify_panic_payload, thread::{self, WaitOutcome}, - worker_event_loop, WorkerKind, + WorkerKind, }, }; use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::{executor_params::DEFAULT_NATIVE_STACK_MAX, ExecutorParams}; use std::{ - io, + io::{self, Read}, os::unix::net::UnixStream, path::PathBuf, + process, sync::{mpsc::channel, Arc}, time::Duration, }; @@ -105,7 +115,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, Duration)> { Ok((params, execution_timeout)) } -fn send_response(stream: &mut UnixStream, response: Response) -> io::Result<()> { +fn send_response(stream: &mut UnixStream, response: WorkerResponse) -> io::Result<()> { framed_send_blocking(stream, &response.encode()) } @@ -131,7 +141,7 @@ pub fn worker_entrypoint( worker_version: Option<&str>, security_status: SecurityStatus, ) { - worker_event_loop( + run_worker( WorkerKind::Execute, socket_path, worker_dir_path, @@ -139,7 +149,7 @@ pub fn worker_entrypoint( worker_version, &security_status, |mut stream, worker_dir_path| { - let worker_pid = std::process::id(); + let worker_pid = process::id(); let artifact_path = worker_dir::execute_artifact(&worker_dir_path); let Handshake { executor_params } = recv_handshake(&mut stream)?; @@ -157,7 +167,7 @@ pub fn worker_entrypoint( let compiled_artifact_blob = match std::fs::read(&artifact_path) { Ok(bytes) => bytes, Err(err) => { - let response = Response::InternalError( + let response = WorkerResponse::InternalError( InternalValidationError::CouldNotOpenFile(err.to_string()), ); send_response(&mut stream, response)?; @@ -165,82 +175,51 @@ pub fn worker_entrypoint( }, }; - // Conditional variable to notify us when a thread is done. - let condvar = thread::get_condvar(); + let (pipe_reader, pipe_writer) = os_pipe::pipe()?; - let cpu_time_start = ProcessTime::now(); + let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => { + let response = internal_error_from_errno("getrusage before", errno); + send_response(&mut stream, response)?; + continue + }, + }; + + // SAFETY: new process is spawned within a single threaded process. This invariant + // is enforced by tests. + let response = match unsafe { nix::unistd::fork() } { + Err(errno) => internal_error_from_errno("fork", errno), + Ok(ForkResult::Child) => { + // Dropping the stream closes the underlying socket. We want to make sure + // that the sandboxed child can't get any kind of information from the + // outside world. The only IPC it should be able to do is sending its + // response over the pipe. + drop(stream); + // Drop the read end so we don't have too many FDs open. + drop(pipe_reader); - // Spawn a new thread that runs the CPU time monitor. - let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); - let cpu_time_monitor_thread = thread::spawn_worker_thread( - "cpu time monitor thread", - move || { - cpu_time_monitor_loop( - cpu_time_start, + handle_child_process( + pipe_writer, + compiled_artifact_blob, + executor_params, + params, execution_timeout, - cpu_time_monitor_rx, - ) - }, - Arc::clone(&condvar), - WaitOutcome::TimedOut, - )?; - - let executor_params_2 = executor_params.clone(); - let execute_thread = thread::spawn_worker_thread_with_stack_size( - "execute thread", - move || { - validate_using_artifact( - &compiled_artifact_blob, - &executor_params_2, - ¶ms, - cpu_time_start, ) }, - Arc::clone(&condvar), - WaitOutcome::Finished, - EXECUTE_THREAD_STACK_SIZE, - )?; - - let outcome = thread::wait_for_threads(condvar); - - let response = match outcome { - WaitOutcome::Finished => { - let _ = cpu_time_monitor_tx.send(()); - execute_thread - .join() - .unwrap_or_else(|e| Response::Panic(stringify_panic_payload(e))) - }, - // If the CPU thread is not selected, we signal it to end, the join handle is - // dropped and the thread will finish in the background. - WaitOutcome::TimedOut => { - match cpu_time_monitor_thread.join() { - Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't - // finished. - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "execute job took {}ms cpu time, exceeded execute timeout {}ms", - cpu_time_elapsed.as_millis(), - execution_timeout.as_millis(), - ); - Response::TimedOut - }, - Ok(None) => Response::InternalError( - InternalValidationError::CpuTimeMonitorThread( - "error communicating over finished channel".into(), - ), - ), - Err(e) => Response::InternalError( - InternalValidationError::CpuTimeMonitorThread( - stringify_panic_payload(e), - ), - ), - } + Ok(ForkResult::Parent { child }) => { + // the read end will wait until all write ends have been closed, + // this drop is necessary to avoid deadlock + drop(pipe_writer); + + handle_parent_process( + pipe_reader, + child, + worker_pid, + usage_before, + execution_timeout, + )? }, - WaitOutcome::Pending => unreachable!( - "we run wait_while until the outcome is no longer pending; qed" - ), }; gum::trace!( @@ -259,27 +238,275 @@ fn validate_using_artifact( compiled_artifact_blob: &[u8], executor_params: &ExecutorParams, params: &[u8], - cpu_time_start: ProcessTime, -) -> Response { +) -> JobResponse { let descriptor_bytes = match unsafe { // SAFETY: this should be safe since the compiled artifact passed here comes from the // file created by the prepare workers. These files are obtained by calling // [`executor_intf::prepare`]. execute_artifact(compiled_artifact_blob, executor_params, params) } { - Err(err) => return Response::format_invalid("execute", &err), + Err(err) => return JobResponse::format_invalid("execute", &err), Ok(d) => d, }; let result_descriptor = match ValidationResult::decode(&mut &descriptor_bytes[..]) { Err(err) => - return Response::format_invalid("validation result decoding failed", &err.to_string()), + return JobResponse::format_invalid( + "validation result decoding failed", + &err.to_string(), + ), Ok(r) => r, }; - // Include the decoding in the measured time, to prevent any potential attacks exploiting some - // bug in decoding. - let duration = cpu_time_start.elapsed(); + JobResponse::Ok { result_descriptor } +} + +/// This is used to handle child process during pvf execute worker. +/// It execute the artifact and pipes back the response to the parent process +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `compiled_artifact_blob`: The artifact bytes from compiled by the prepare worker`. +/// +/// - `executor_params`: Deterministically serialized execution environment semantics. +/// +/// - `params`: Validation parameters. +/// +/// - `execution_timeout`: The timeout in `Duration`. +/// +/// # Returns +/// +/// - pipe back `JobResponse` to the parent process. +fn handle_child_process( + mut pipe_write: PipeWriter, + compiled_artifact_blob: Vec, + executor_params: ExecutorParams, + params: Vec, + execution_timeout: Duration, +) -> ! { + gum::debug!( + target: LOG_TARGET, + worker_job_pid = %process::id(), + "worker job: executing artifact", + ); + + // Conditional variable to notify us when a thread is done. + let condvar = thread::get_condvar(); + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. + let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); + let cpu_time_monitor_thread = thread::spawn_worker_thread( + "cpu time monitor thread", + move || cpu_time_monitor_loop(cpu_time_start, execution_timeout, cpu_time_monitor_rx), + Arc::clone(&condvar), + WaitOutcome::TimedOut, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(JobError::CouldNotSpawnThread(err.to_string()))) + }); + + let executor_params_2 = executor_params.clone(); + let execute_thread = thread::spawn_worker_thread_with_stack_size( + "execute thread", + move || validate_using_artifact(&compiled_artifact_blob, &executor_params_2, ¶ms), + Arc::clone(&condvar), + WaitOutcome::Finished, + EXECUTE_THREAD_STACK_SIZE, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(JobError::CouldNotSpawnThread(err.to_string()))) + }); + + let outcome = thread::wait_for_threads(condvar); + + let response = match outcome { + WaitOutcome::Finished => { + let _ = cpu_time_monitor_tx.send(()); + execute_thread.join().map_err(|e| JobError::Panic(stringify_panic_payload(e))) + }, + // If the CPU thread is not selected, we signal it to end, the join handle is + // dropped and the thread will finish in the background. + WaitOutcome::TimedOut => match cpu_time_monitor_thread.join() { + Ok(Some(_cpu_time_elapsed)) => Err(JobError::TimedOut), + Ok(None) => Err(JobError::CpuTimeMonitorThread( + "error communicating over finished channel".into(), + )), + Err(e) => Err(JobError::CpuTimeMonitorThread(stringify_panic_payload(e))), + }, + WaitOutcome::Pending => + unreachable!("we run wait_while until the outcome is no longer pending; qed"), + }; + + send_child_response(&mut pipe_write, response); +} + +/// Waits for child process to finish and handle child response from pipe. +/// +/// # Arguments +/// +/// - `pipe_read`: A `PipeReader` used to read data from the child process. +/// +/// - `child`: The child pid. +/// +/// - `usage_before`: Resource usage statistics before executing the child process. +/// +/// - `timeout`: The maximum allowed time for the child process to finish, in `Duration`. +/// +/// # Returns +/// +/// - The response, either `Ok` or some error state. +fn handle_parent_process( + mut pipe_read: PipeReader, + child: Pid, + worker_pid: u32, + usage_before: Usage, + timeout: Duration, +) -> io::Result { + // Read from the child. Don't decode unless the process exited normally, which we check later. + let mut received_data = Vec::new(); + pipe_read + .read_to_end(&mut received_data) + // Could not decode job response. There is either a bug or the job was hijacked. + // Should retry at any rate. + .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; + + let status = nix::sys::wait::waitpid(child, None); + gum::trace!( + target: LOG_TARGET, + %worker_pid, + "execute worker received wait status from job: {:?}", + status, + ); + + let usage_after = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => return Ok(internal_error_from_errno("getrusage after", errno)), + }; + + // Using `getrusage` is needed to check whether child has timedout since we cannot rely on + // child to report its own time. + // As `getrusage` returns resource usage from all terminated child processes, + // it is necessary to subtract the usage before the current child process to isolate its cpu + // time + let cpu_tv = get_total_cpu_usage(usage_after) - get_total_cpu_usage(usage_before); + if cpu_tv >= timeout { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + "execute job took {}ms cpu time, exceeded execute timeout {}ms", + cpu_tv.as_millis(), + timeout.as_millis(), + ); + return Ok(WorkerResponse::JobTimedOut) + } + + match status { + Ok(WaitStatus::Exited(_, exit_status)) => { + let mut reader = io::BufReader::new(received_data.as_slice()); + let result = match recv_child_response(&mut reader) { + Ok(result) => result, + Err(err) => return Ok(WorkerResponse::JobError(err.to_string())), + }; + + match result { + Ok(JobResponse::Ok { result_descriptor }) => { + // The exit status should have been zero if no error occurred. + if exit_status != 0 { + return Ok(WorkerResponse::JobError(format!( + "unexpected exit status: {}", + exit_status + ))) + } + + Ok(WorkerResponse::Ok { result_descriptor, duration: cpu_tv }) + }, + Ok(JobResponse::InvalidCandidate(err)) => Ok(WorkerResponse::InvalidCandidate(err)), + Err(job_error) => { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + "execute job error: {}", + job_error, + ); + if matches!(job_error, JobError::TimedOut) { + Ok(WorkerResponse::JobTimedOut) + } else { + Ok(WorkerResponse::JobError(job_error.to_string())) + } + }, + } + }, + // The job was killed by the given signal. + // + // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some + // other reason, so we still need to check for seccomp violations elsewhere. + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => + Ok(WorkerResponse::JobDied(format!("received signal: {signal:?}"))), + Err(errno) => Ok(internal_error_from_errno("waitpid", errno)), + + // It is within an attacker's power to send an unexpected exit status. So we cannot treat + // this as an internal error (which would make us abstain), but must vote against. + Ok(unexpected_wait_status) => Ok(WorkerResponse::JobDied(format!( + "unexpected status from wait: {unexpected_wait_status:?}" + ))), + } +} + +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { + let response_bytes = framed_recv_blocking(received_data)?; + JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("execute pvf recv_child_response: decode error: {:?}", e), + ) + }) +} + +/// Write response to the pipe and exit process after. +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `response`: Child process response, or error. +fn send_child_response(pipe_write: &mut PipeWriter, response: JobResult) -> ! { + framed_send_blocking(pipe_write, response.encode().as_slice()) + .unwrap_or_else(|_| process::exit(libc::EXIT_FAILURE)); + + if response.is_ok() { + process::exit(libc::EXIT_SUCCESS) + } else { + process::exit(libc::EXIT_FAILURE) + } +} - Response::Ok { result_descriptor, duration } +fn internal_error_from_errno(context: &'static str, errno: Errno) -> WorkerResponse { + WorkerResponse::InternalError(InternalValidationError::Kernel(format!( + "{}: {}: {}", + context, + errno, + io::Error::last_os_error() + ))) } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index e21583ecc8b..1cd221533f4 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -14,6 +14,8 @@ rayon = "1.5.1" tracking-allocator = { package = "staging-tracking-allocator", path = "../../../tracking-allocator" } tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } +os_pipe = "1.1.4" +nix = { version = "0.27.1", features = ["resource", "process"]} parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index 37a4dd06075..151b54efc2d 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -28,28 +28,40 @@ const LOG_TARGET: &str = "parachain::pvf-prepare-worker"; use crate::memory_stats::max_rss_stat::{extract_max_rss_stat, get_max_rss_thread}; #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] use crate::memory_stats::memory_tracker::{get_memory_tracker_loop_stats, memory_tracker_loop}; +use libc; +use nix::{ + errno::Errno, + sys::{ + resource::{Usage, UsageWho}, + wait::WaitStatus, + }, + unistd::{ForkResult, Pid}, +}; +use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult, OOM_PAYLOAD}, + error::{PrepareError, PrepareResult}, executor_intf::create_runtime_from_artifact_bytes, framed_recv_blocking, framed_send_blocking, prepare::{MemoryStats, PrepareJobKind, PrepareStats}, pvf::PvfPrepData, worker::{ - cpu_time_monitor_loop, stringify_panic_payload, - thread::{self, WaitOutcome}, - worker_event_loop, WorkerKind, + cpu_time_monitor_loop, run_worker, stringify_panic_payload, + thread::{self, spawn_worker_thread, WaitOutcome}, + WorkerKind, }, worker_dir, ProcessTime, SecurityStatus, }; use polkadot_primitives::ExecutorParams; use std::{ - fs, io, + fs, + io::{self, Read}, os::{ fd::{AsRawFd, RawFd}, unix::net::UnixStream, }, path::PathBuf, + process, sync::{mpsc::channel, Arc}, time::Duration, }; @@ -65,6 +77,7 @@ static ALLOC: TrackingAllocator = static ALLOC: TrackingAllocator = TrackingAllocator(std::alloc::System); /// Contains the bytes for a successfully compiled artifact. +#[derive(Encode, Decode)] pub struct CompiledArtifact(Vec); impl CompiledArtifact { @@ -80,6 +93,7 @@ impl AsRef<[u8]> for CompiledArtifact { } } +/// Get a worker request. fn recv_request(stream: &mut UnixStream) -> io::Result { let pvf = framed_recv_blocking(stream)?; let pvf = PvfPrepData::decode(&mut &pvf[..]).map_err(|e| { @@ -91,6 +105,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result { Ok(pvf) } +/// Send a worker response. fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<()> { framed_send_blocking(stream, &result.encode()) } @@ -111,18 +126,22 @@ fn start_memory_tracking(fd: RawFd, limit: Option) { // Syscalls never allocate or deallocate, so this is safe. libc::syscall(libc::SYS_write, fd, OOM_PAYLOAD.as_ptr(), OOM_PAYLOAD.len()); libc::syscall(libc::SYS_close, fd); - libc::syscall(libc::SYS_exit, 1); + // Make sure we exit from all threads. Copied from glibc. + libc::syscall(libc::SYS_exit_group, 1); + loop { + libc::syscall(libc::SYS_exit, 1); + } } #[cfg(not(target_os = "linux"))] { // Syscalls are not available on MacOS, so we have to use `libc` wrappers. - // Technicaly, there may be allocations inside, although they shouldn't be + // Technically, there may be allocations inside, although they shouldn't be // there. In that case, we'll see deadlocks on MacOS after the OOM condition // triggered. As we consider running a validator on MacOS unsafe, and this // code is only run by a validator, it's a lesser evil. libc::write(fd, OOM_PAYLOAD.as_ptr().cast(), OOM_PAYLOAD.len()); libc::close(fd); - std::process::exit(1); + libc::_exit(1); } })), ); @@ -155,17 +174,19 @@ fn end_memory_tracking() -> isize { /// /// 1. Get the code and parameters for preparation from the host. /// -/// 2. Start a memory tracker in a separate thread. +/// 2. Start a new child process /// -/// 3. Start the CPU time monitor loop and the actual preparation in two separate threads. +/// 3. Start the memory tracker and the actual preparation in two separate threads. /// /// 4. Wait on the two threads created in step 3. /// /// 5. Stop the memory tracker and get the stats. /// -/// 6. If compilation succeeded, write the compiled artifact into a temporary file. +/// 6. Pipe the result back to the parent process and exit from child process. /// -/// 7. Send the result of preparation back to the host. If any error occurred in the above steps, we +/// 7. If compilation succeeded, write the compiled artifact into a temporary file. +/// +/// 8. Send the result of preparation back to the host. If any error occurred in the above steps, we /// send that in the `PrepareResult`. pub fn worker_entrypoint( socket_path: PathBuf, @@ -174,7 +195,7 @@ pub fn worker_entrypoint( worker_version: Option<&str>, security_status: SecurityStatus, ) { - worker_event_loop( + run_worker( WorkerKind::Prepare, socket_path, worker_dir_path, @@ -182,7 +203,7 @@ pub fn worker_entrypoint( worker_version, &security_status, |mut stream, worker_dir_path| { - let worker_pid = std::process::id(); + let worker_pid = process::id(); let temp_artifact_dest = worker_dir::prepare_tmp_artifact(&worker_dir_path); loop { @@ -197,186 +218,58 @@ pub fn worker_entrypoint( let prepare_job_kind = pvf.prep_kind(); let executor_params = pvf.executor_params(); - // Conditional variable to notify us when a thread is done. - let condvar = thread::get_condvar(); - - // Run the memory tracker in a regular, non-worker thread. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let condvar_memory = Arc::clone(&condvar); - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_thread = std::thread::spawn(|| memory_tracker_loop(condvar_memory)); + let (pipe_reader, pipe_writer) = os_pipe::pipe()?; - let cpu_time_start = ProcessTime::now(); - - // Spawn a new thread that runs the CPU time monitor. - let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); - let cpu_time_monitor_thread = thread::spawn_worker_thread( - "cpu time monitor thread", - move || { - cpu_time_monitor_loop( - cpu_time_start, - preparation_timeout, - cpu_time_monitor_rx, - ) + let usage_before = match nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) { + Ok(usage) => usage, + Err(errno) => { + let result = Err(error_from_errno("getrusage before", errno)); + send_response(&mut stream, result)?; + continue }, - Arc::clone(&condvar), - WaitOutcome::TimedOut, - )?; - - start_memory_tracking( - stream.as_raw_fd(), - executor_params.prechecking_max_memory().map(|v| { - v.try_into().unwrap_or_else(|_| { - gum::warn!( - LOG_TARGET, - %worker_pid, - "Illegal pre-checking max memory value {} discarded", - v, - ); - 0 - }) - }), - ); - - // Spawn another thread for preparation. - let prepare_thread = thread::spawn_worker_thread( - "prepare thread", - move || { - #[allow(unused_mut)] - let mut result = prepare_artifact(pvf, cpu_time_start); - - // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. - #[cfg(target_os = "linux")] - let mut result = result - .map(|(artifact, elapsed)| (artifact, elapsed, get_max_rss_thread())); - - // If we are pre-checking, check for runtime construction errors. - // - // As pre-checking is more strict than just preparation in terms of memory - // and time, it is okay to do extra checks here. This takes negligible time - // anyway. - if let PrepareJobKind::Prechecking = prepare_job_kind { - result = result.and_then(|output| { - runtime_construction_check( - output.0.as_ref(), - executor_params.as_ref(), - )?; - Ok(output) - }); - } - - result - }, - Arc::clone(&condvar), - WaitOutcome::Finished, - )?; - - let outcome = thread::wait_for_threads(condvar); - - let peak_alloc = { - let peak = end_memory_tracking(); - gum::debug!( - target: LOG_TARGET, - %worker_pid, - "prepare job peak allocation is {} bytes", - peak, - ); - peak }; - let result = match outcome { - WaitOutcome::Finished => { - let _ = cpu_time_monitor_tx.send(()); - - match prepare_thread.join().unwrap_or_else(|err| { - Err(PrepareError::Panic(stringify_panic_payload(err))) - }) { - Err(err) => { - // Serialized error will be written into the socket. - Err(err) - }, - Ok(ok) => { - cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - let (artifact, cpu_time_elapsed, max_rss) = ok; - } else { - let (artifact, cpu_time_elapsed) = ok; - } - } - - // Stop the memory stats worker and get its observed memory stats. - #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] - let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, worker_pid); - let memory_stats = MemoryStats { - #[cfg(any( - target_os = "linux", - feature = "jemalloc-allocator" - ))] - memory_tracker_stats, - #[cfg(target_os = "linux")] - max_rss: extract_max_rss_stat(max_rss, worker_pid), - // Negative peak allocation values are legit; they are narrow - // corner cases and shouldn't affect overall statistics - // significantly - peak_tracked_alloc: if peak_alloc > 0 { - peak_alloc as u64 - } else { - 0u64 - }, - }; - - // Write the serialized artifact into a temp file. - // - // PVF host only keeps artifacts statuses in its memory, - // successfully compiled code gets stored on the disk (and - // consequently deserialized by execute-workers). The prepare worker - // is only required to send `Ok` to the pool to indicate the - // success. - - gum::debug!( - target: LOG_TARGET, - %worker_pid, - "worker: writing artifact to {}", - temp_artifact_dest.display(), - ); - fs::write(&temp_artifact_dest, &artifact)?; - - Ok(PrepareStats { cpu_time_elapsed, memory_stats }) - }, - } + // SAFETY: new process is spawned within a single threaded process. This invariant + // is enforced by tests. + let result = match unsafe { nix::unistd::fork() } { + Err(errno) => Err(error_from_errno("fork", errno)), + Ok(ForkResult::Child) => { + // Dropping the stream closes the underlying socket. We want to make sure + // that the sandboxed child can't get any kind of information from the + // outside world. The only IPC it should be able to do is sending its + // response over the pipe. + drop(stream); + // Drop the read end so we don't have too many FDs open. + drop(pipe_reader); + + handle_child_process( + pvf, + pipe_writer, + preparation_timeout, + prepare_job_kind, + executor_params, + ) }, - // If the CPU thread is not selected, we signal it to end, the join handle is - // dropped and the thread will finish in the background. - WaitOutcome::TimedOut => { - match cpu_time_monitor_thread.join() { - Ok(Some(cpu_time_elapsed)) => { - // Log if we exceed the timeout and the other thread hasn't - // finished. - gum::warn!( - target: LOG_TARGET, - %worker_pid, - "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", - cpu_time_elapsed.as_millis(), - preparation_timeout.as_millis(), - ); - Err(PrepareError::TimedOut) - }, - Ok(None) => Err(PrepareError::IoErr( - "error communicating over closed channel".into(), - )), - // Errors in this thread are independent of the PVF. - Err(err) => Err(PrepareError::IoErr(stringify_panic_payload(err))), - } + Ok(ForkResult::Parent { child }) => { + // the read end will wait until all write ends have been closed, + // this drop is necessary to avoid deadlock + drop(pipe_writer); + + handle_parent_process( + pipe_reader, + child, + temp_artifact_dest.clone(), + worker_pid, + usage_before, + preparation_timeout, + ) }, - WaitOutcome::Pending => unreachable!( - "we run wait_while until the outcome is no longer pending; qed" - ), }; gum::trace!( target: LOG_TARGET, %worker_pid, - "worker: sending response to host: {:?}", + "worker: sending result to host: {:?}", result ); send_response(&mut stream, result)?; @@ -385,10 +278,7 @@ pub fn worker_entrypoint( ); } -fn prepare_artifact( - pvf: PvfPrepData, - cpu_time_start: ProcessTime, -) -> Result<(CompiledArtifact, Duration), PrepareError> { +fn prepare_artifact(pvf: PvfPrepData) -> Result { let blob = match prevalidate(&pvf.code()) { Err(err) => return Err(PrepareError::Prevalidation(format!("{:?}", err))), Ok(b) => b, @@ -398,7 +288,6 @@ fn prepare_artifact( Ok(compiled_artifact) => Ok(CompiledArtifact::new(compiled_artifact)), Err(err) => Err(PrepareError::Preparation(format!("{:?}", err))), } - .map(|artifact| (artifact, cpu_time_start.elapsed())) } /// Try constructing the runtime to catch any instantiation errors during pre-checking. @@ -412,3 +301,372 @@ fn runtime_construction_check( .map(|_runtime| ()) .map_err(|err| PrepareError::RuntimeConstruction(format!("{:?}", err))) } + +#[derive(Encode, Decode)] +struct JobResponse { + artifact: CompiledArtifact, + memory_stats: MemoryStats, +} + +/// This is used to handle child process during pvf prepare worker. +/// It prepares the artifact and tracks memory stats during preparation +/// and pipes back the response to the parent process +/// +/// # Arguments +/// +/// - `pvf`: `PvfPrepData` structure, containing data to prepare the artifact +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `preparation_timeout`: The timeout in `Duration`. +/// +/// - `prepare_job_kind`: The kind of prepare job. +/// +/// - `executor_params`: Deterministically serialized execution environment semantics. +/// +/// # Returns +/// +/// - If any error occur, pipe response back with `PrepareError`. +/// +/// - If success, pipe back `JobResponse`. +fn handle_child_process( + pvf: PvfPrepData, + mut pipe_write: PipeWriter, + preparation_timeout: Duration, + prepare_job_kind: PrepareJobKind, + executor_params: Arc, +) -> ! { + let worker_job_pid = process::id(); + gum::debug!( + target: LOG_TARGET, + %worker_job_pid, + ?prepare_job_kind, + ?preparation_timeout, + "worker job: preparing artifact", + ); + + // Conditional variable to notify us when a thread is done. + let condvar = thread::get_condvar(); + + // Run the memory tracker in a regular, non-worker thread. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let condvar_memory = Arc::clone(&condvar); + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let memory_tracker_thread = std::thread::spawn(|| memory_tracker_loop(condvar_memory)); + + start_memory_tracking( + pipe_write.as_raw_fd(), + executor_params.prechecking_max_memory().map(|v| { + v.try_into().unwrap_or_else(|_| { + gum::warn!( + LOG_TARGET, + %worker_job_pid, + "Illegal pre-checking max memory value {} discarded", + v, + ); + 0 + }) + }), + ); + + let cpu_time_start = ProcessTime::now(); + + // Spawn a new thread that runs the CPU time monitor. + let (cpu_time_monitor_tx, cpu_time_monitor_rx) = channel::<()>(); + let cpu_time_monitor_thread = thread::spawn_worker_thread( + "cpu time monitor thread", + move || cpu_time_monitor_loop(cpu_time_start, preparation_timeout, cpu_time_monitor_rx), + Arc::clone(&condvar), + WaitOutcome::TimedOut, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(PrepareError::IoErr(err.to_string()))) + }); + + let prepare_thread = spawn_worker_thread( + "prepare worker", + move || { + #[allow(unused_mut)] + let mut result = prepare_artifact(pvf); + + // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. + #[cfg(target_os = "linux")] + let mut result = result.map(|artifact| (artifact, get_max_rss_thread())); + + // If we are pre-checking, check for runtime construction errors. + // + // As pre-checking is more strict than just preparation in terms of memory + // and time, it is okay to do extra checks here. This takes negligible time + // anyway. + if let PrepareJobKind::Prechecking = prepare_job_kind { + result = result.and_then(|output| { + runtime_construction_check(output.0.as_ref(), &executor_params)?; + Ok(output) + }); + } + result + }, + Arc::clone(&condvar), + WaitOutcome::Finished, + ) + .unwrap_or_else(|err| { + send_child_response(&mut pipe_write, Err(PrepareError::IoErr(err.to_string()))) + }); + + let outcome = thread::wait_for_threads(condvar); + + let peak_alloc = { + let peak = end_memory_tracking(); + gum::debug!( + target: LOG_TARGET, + %worker_job_pid, + "prepare job peak allocation is {} bytes", + peak, + ); + peak + }; + + let result = match outcome { + WaitOutcome::Finished => { + let _ = cpu_time_monitor_tx.send(()); + + match prepare_thread.join().unwrap_or_else(|err| { + send_child_response( + &mut pipe_write, + Err(PrepareError::JobError(stringify_panic_payload(err))), + ) + }) { + Err(err) => Err(err), + Ok(ok) => { + cfg_if::cfg_if! { + if #[cfg(target_os = "linux")] { + let (artifact, max_rss) = ok; + } else { + let artifact = ok; + } + } + + // Stop the memory stats worker and get its observed memory stats. + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + let memory_tracker_stats = get_memory_tracker_loop_stats(memory_tracker_thread, process::id()); + + let memory_stats = MemoryStats { + #[cfg(any(target_os = "linux", feature = "jemalloc-allocator"))] + memory_tracker_stats, + #[cfg(target_os = "linux")] + max_rss: extract_max_rss_stat(max_rss, process::id()), + // Negative peak allocation values are legit; they are narrow + // corner cases and shouldn't affect overall statistics + // significantly + peak_tracked_alloc: if peak_alloc > 0 { peak_alloc as u64 } else { 0u64 }, + }; + + Ok(JobResponse { artifact, memory_stats }) + }, + } + }, + + // If the CPU thread is not selected, we signal it to end, the join handle is + // dropped and the thread will finish in the background. + WaitOutcome::TimedOut => match cpu_time_monitor_thread.join() { + Ok(Some(_cpu_time_elapsed)) => Err(PrepareError::TimedOut), + Ok(None) => Err(PrepareError::IoErr("error communicating over closed channel".into())), + Err(err) => Err(PrepareError::IoErr(stringify_panic_payload(err))), + }, + WaitOutcome::Pending => + unreachable!("we run wait_while until the outcome is no longer pending; qed"), + }; + + send_child_response(&mut pipe_write, result); +} + +/// Waits for child process to finish and handle child response from pipe. +/// +/// # Arguments +/// +/// - `pipe_read`: A `PipeReader` used to read data from the child process. +/// +/// - `child`: The child pid. +/// +/// - `temp_artifact_dest`: The destination `PathBuf` to write the temporary artifact file. +/// +/// - `worker_pid`: The PID of the child process. +/// +/// - `usage_before`: Resource usage statistics before executing the child process. +/// +/// - `timeout`: The maximum allowed time for the child process to finish, in `Duration`. +/// +/// # Returns +/// +/// - If the child send response without an error, this function returns `Ok(PrepareStats)` +/// containing memory and CPU usage statistics. +/// +/// - If the child send response with an error, it returns a `PrepareError` with that error. +/// +/// - If the child process timeout, it returns `PrepareError::TimedOut`. +fn handle_parent_process( + mut pipe_read: PipeReader, + child: Pid, + temp_artifact_dest: PathBuf, + worker_pid: u32, + usage_before: Usage, + timeout: Duration, +) -> Result { + // Read from the child. Don't decode unless the process exited normally, which we check later. + let mut received_data = Vec::new(); + pipe_read + .read_to_end(&mut received_data) + .map_err(|err| PrepareError::IoErr(err.to_string()))?; + + let status = nix::sys::wait::waitpid(child, None); + gum::trace!( + target: LOG_TARGET, + %worker_pid, + "prepare worker received wait status from job: {:?}", + status, + ); + + let usage_after = nix::sys::resource::getrusage(UsageWho::RUSAGE_CHILDREN) + .map_err(|errno| error_from_errno("getrusage after", errno))?; + + // Using `getrusage` is needed to check whether child has timedout since we cannot rely on + // child to report its own time. + // As `getrusage` returns resource usage from all terminated child processes, + // it is necessary to subtract the usage before the current child process to isolate its cpu + // time + let cpu_tv = get_total_cpu_usage(usage_after) - get_total_cpu_usage(usage_before); + if cpu_tv >= timeout { + gum::warn!( + target: LOG_TARGET, + %worker_pid, + "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", + cpu_tv.as_millis(), + timeout.as_millis(), + ); + return Err(PrepareError::TimedOut) + } + + match status { + Ok(WaitStatus::Exited(_pid, exit_status)) => { + let mut reader = io::BufReader::new(received_data.as_slice()); + let result = recv_child_response(&mut reader) + .map_err(|err| PrepareError::JobError(err.to_string()))?; + + match result { + Err(err) => Err(err), + Ok(response) => { + // The exit status should have been zero if no error occurred. + if exit_status != 0 { + return Err(PrepareError::JobError(format!( + "unexpected exit status: {}", + exit_status + ))) + } + + // Write the serialized artifact into a temp file. + // + // PVF host only keeps artifacts statuses in its memory, + // successfully compiled code gets stored on the disk (and + // consequently deserialized by execute-workers). The prepare worker + // is only required to send `Ok` to the pool to indicate the + // success. + gum::debug!( + target: LOG_TARGET, + %worker_pid, + "worker: writing artifact to {}", + temp_artifact_dest.display(), + ); + // Write to the temp file created by the host. + if let Err(err) = fs::write(&temp_artifact_dest, &response.artifact) { + return Err(PrepareError::IoErr(err.to_string())) + }; + + Ok(PrepareStats { + memory_stats: response.memory_stats, + cpu_time_elapsed: cpu_tv, + }) + }, + } + }, + // The job was killed by the given signal. + // + // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some + // other reason, so we still need to check for seccomp violations elsewhere. + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => + Err(PrepareError::JobDied(format!("received signal: {signal:?}"))), + Err(errno) => Err(error_from_errno("waitpid", errno)), + + // An attacker can make the child process return any exit status it wants. So we can treat + // all unexpected cases the same way. + Ok(unexpected_wait_status) => Err(PrepareError::JobDied(format!( + "unexpected status from wait: {unexpected_wait_status:?}" + ))), + } +} + +/// Calculate the total CPU time from the given `usage` structure, returned from +/// [`nix::sys::resource::getrusage`], and calculates the total CPU time spent, including both user +/// and system time. +/// +/// # Arguments +/// +/// - `rusage`: Contains resource usage information. +/// +/// # Returns +/// +/// Returns a `Duration` representing the total CPU time. +fn get_total_cpu_usage(rusage: Usage) -> Duration { + let micros = (((rusage.user_time().tv_sec() + rusage.system_time().tv_sec()) * 1_000_000) + + (rusage.system_time().tv_usec() + rusage.user_time().tv_usec()) as i64) as u64; + + return Duration::from_micros(micros) +} + +/// Get a job response. +fn recv_child_response(received_data: &mut io::BufReader<&[u8]>) -> io::Result { + let response_bytes = framed_recv_blocking(received_data)?; + JobResult::decode(&mut response_bytes.as_slice()).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("prepare pvf recv_child_response: decode error: {:?}", e), + ) + }) +} + +/// Write a job response to the pipe and exit process after. +/// +/// # Arguments +/// +/// - `pipe_write`: A `PipeWriter` structure, the writing end of a pipe. +/// +/// - `response`: Child process response +fn send_child_response(pipe_write: &mut PipeWriter, response: JobResult) -> ! { + framed_send_blocking(pipe_write, response.encode().as_slice()) + .unwrap_or_else(|_| process::exit(libc::EXIT_FAILURE)); + + if response.is_ok() { + process::exit(libc::EXIT_SUCCESS) + } else { + process::exit(libc::EXIT_FAILURE) + } +} + +fn error_from_errno(context: &'static str, errno: Errno) -> PrepareError { + PrepareError::Kernel(format!("{}: {}: {}", context, errno, io::Error::last_os_error())) +} + +type JobResult = Result; + +/// Pre-encoded length-prefixed `Result::Err(PrepareError::OutOfMemory)` +const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08"; + +#[test] +fn pre_encoded_payloads() { + // NOTE: This must match the type of `response` in `send_child_response`. + let oom_unencoded: JobResult = Result::Err(PrepareError::OutOfMemory); + let oom_encoded = oom_unencoded.encode(); + // The payload is prefixed with its length in `framed_send`. + let mut oom_payload = oom_encoded.len().to_le_bytes().to_vec(); + oom_payload.extend(oom_encoded); + assert_eq!(oom_payload, OOM_PAYLOAD); +} diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 87ef0b54a04..7fdb8c56ec9 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -33,36 +33,37 @@ pub enum ValidationError { pub enum InvalidCandidate { /// PVF preparation ended up with a deterministic error. PrepareError(String), - /// The failure is reported by the execution worker. The string contains the error message. - WorkerReportedError(String), - /// The worker has died during validation of a candidate. That may fall in one of the following - /// categories, which we cannot distinguish programmatically: + /// The candidate is reported to be invalid by the execution worker. The string contains the + /// error message. + WorkerReportedInvalid(String), + /// The worker process (not the job) has died during validation of a candidate. /// - /// (a) Some sort of transient glitch caused the worker process to abort. An example would be - /// that the host machine ran out of free memory and the OOM killer started killing the - /// processes, and in order to save the parent it will "sacrifice child" first. - /// - /// (b) The candidate triggered a code path that has lead to the process death. For example, - /// the PVF found a way to consume unbounded amount of resources and then it either - /// exceeded an `rlimit` (if set) or, again, invited OOM killer. Another possibility is a - /// bug in wasmtime allowed the PVF to gain control over the execution worker. - /// - /// We attribute such an event to an *invalid candidate* in either case. - /// - /// The rationale for this is that a glitch may lead to unfair rejecting candidate by a single - /// validator. If the glitch is somewhat more persistent the validator will reject all - /// candidate thrown at it and hopefully the operator notices it by decreased reward - /// performance of the validator. On the other hand, if the worker died because of (b) we would - /// have better chances to stop the attack. + /// It's unlikely that this is caused by malicious code since workers spawn separate job + /// processes, and those job processes are sandboxed. But, it is possible. We retry in this + /// case, and if the error persists, we assume it's caused by the candidate and vote against. AmbiguousWorkerDeath, /// PVF execution (compilation is not included) took more time than was allotted. HardTimeout, - /// A panic occurred and we can't be sure whether the candidate is really invalid or some - /// internal glitch occurred. Whenever we are unsure, we can never treat an error as internal - /// as we would abstain from voting. This is bad because if the issue was due to the candidate, - /// then all validators would abstain, stalling finality on the chain. So we will first retry - /// the candidate, and if the issue persists we are forced to vote invalid. - Panic(String), + /// The job process (not the worker) has died for one of the following reasons: + /// + /// (a) A seccomp violation occurred, most likely due to an attempt by malicious code to + /// execute arbitrary code. Note that there is no foolproof way to detect this if the operator + /// has seccomp auditing disabled. + /// + /// (b) The host machine ran out of free memory and the OOM killer started killing the + /// processes, and in order to save the parent it will "sacrifice child" first. + /// + /// (c) Some other reason, perhaps transient or perhaps caused by malicious code. + /// + /// We cannot treat this as an internal error because malicious code may have caused this. + AmbiguousJobDeath(String), + /// An unexpected error occurred in the job process and we can't be sure whether the candidate + /// is really invalid or some internal glitch occurred. Whenever we are unsure, we can never + /// treat an error as internal as we would abstain from voting. This is bad because if the + /// issue was due to the candidate, then all validators would abstain, stalling finality on the + /// chain. So we will first retry the candidate, and if the issue persists we are forced to + /// vote invalid. + JobError(String), } impl From for ValidationError { diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index aca604f0de2..257377df3f4 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -342,20 +342,27 @@ fn handle_job_finish( }, Outcome::InvalidCandidate { err, idle_worker } => ( Some(idle_worker), - Err(ValidationError::InvalidCandidate(InvalidCandidate::WorkerReportedError(err))), + Err(ValidationError::InvalidCandidate(InvalidCandidate::WorkerReportedInvalid(err))), None, ), Outcome::InternalError { err } => (None, Err(ValidationError::InternalError(err)), None), + // Either the worker or the job timed out. Kill the worker in either case. Treated as + // definitely-invalid, because if we timed out, there's no time left for a retry. Outcome::HardTimeout => (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)), None), // "Maybe invalid" errors (will retry). - Outcome::IoErr => ( + Outcome::WorkerIntfErr => ( None, Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)), None, ), - Outcome::Panic { err } => - (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::Panic(err))), None), + Outcome::JobDied { err } => ( + None, + Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousJobDeath(err))), + None, + ), + Outcome::JobError { err } => + (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::JobError(err))), None), }; queue.metrics.execute_finished(); diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_intf.rs index 61264f7d517..bf44ba01725 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_intf.rs @@ -30,7 +30,7 @@ use futures_timer::Delay; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::InternalValidationError, - execute::{Handshake, Response}, + execute::{Handshake, WorkerResponse}, worker_dir, SecurityStatus, }; use polkadot_parachain_primitives::primitives::ValidationResult; @@ -88,19 +88,26 @@ pub enum Outcome { /// a trap. Errors related to the preparation process are not expected to be encountered by the /// execution workers. InvalidCandidate { err: String, idle_worker: IdleWorker }, + /// The execution time exceeded the hard limit. The worker is terminated. + HardTimeout, + /// An I/O error happened during communication with the worker. This may mean that the worker + /// process already died. The token is not returned in any case. + WorkerIntfErr, + /// The job process has died. We must kill the worker just in case. + /// + /// We cannot treat this as an internal error because malicious code may have caused this. + JobDied { err: String }, + /// An unexpected error occurred in the job process. + /// + /// Because malicious code can cause a job error, we must not treat it as an internal error. + JobError { err: String }, + /// An internal error happened during the validation. Such an error is most likely related to /// some transient glitch. /// /// Should only ever be used for errors independent of the candidate and PVF. Therefore it may /// be a problem with the worker, so we terminate it. InternalError { err: InternalValidationError }, - /// The execution time exceeded the hard limit. The worker is terminated. - HardTimeout, - /// An I/O error happened during communication with the worker. This may mean that the worker - /// process already died. The token is not returned in any case. - IoErr, - /// An unexpected panic has occurred in the execution worker. - Panic { err: String }, } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -137,7 +144,7 @@ pub async fn start_work( ?error, "failed to send an execute request", ); - return Outcome::IoErr + return Outcome::WorkerIntfErr } // We use a generous timeout here. This is in addition to the one in the child process, in @@ -173,7 +180,7 @@ pub async fn start_work( ); } - return Outcome::IoErr + return Outcome::WorkerIntfErr }, Ok(response) => { // Check if any syscall violations occurred during the job. For now this is @@ -189,7 +196,7 @@ pub async fn start_work( ); } - if let Response::Ok{duration, ..} = response { + if let WorkerResponse::Ok{duration, ..} = response { if duration > execution_timeout { // The job didn't complete within the timeout. gum::warn!( @@ -201,7 +208,7 @@ pub async fn start_work( ); // Return a timeout error. - return Outcome::HardTimeout; + return Outcome::HardTimeout } } @@ -216,23 +223,25 @@ pub async fn start_work( validation_code_hash = ?artifact.id.code_hash, "execution worker exceeded lenient timeout for execution, child worker likely stalled", ); - Response::TimedOut + WorkerResponse::JobTimedOut }, }; match response { - Response::Ok { result_descriptor, duration } => Outcome::Ok { + WorkerResponse::Ok { result_descriptor, duration } => Outcome::Ok { result_descriptor, duration, idle_worker: IdleWorker { stream, pid, worker_dir }, }, - Response::InvalidCandidate(err) => Outcome::InvalidCandidate { + WorkerResponse::InvalidCandidate(err) => Outcome::InvalidCandidate { err, idle_worker: IdleWorker { stream, pid, worker_dir }, }, - Response::TimedOut => Outcome::HardTimeout, - Response::Panic(err) => Outcome::Panic { err }, - Response::InternalError(err) => Outcome::InternalError { err }, + WorkerResponse::JobTimedOut => Outcome::HardTimeout, + WorkerResponse::JobDied(err) => Outcome::JobDied { err }, + WorkerResponse::JobError(err) => Outcome::JobError { err }, + + WorkerResponse::InternalError(err) => Outcome::InternalError { err }, } }) .await @@ -306,9 +315,9 @@ async fn send_request( framed_send(stream, &execution_timeout.encode()).await } -async fn recv_response(stream: &mut UnixStream) -> io::Result { +async fn recv_response(stream: &mut UnixStream) -> io::Result { let response_bytes = framed_recv(stream).await?; - Response::decode(&mut &response_bytes[..]).map_err(|e| { + WorkerResponse::decode(&mut response_bytes.as_slice()).map_err(|e| { io::Error::new( io::ErrorKind::Other, format!("execute pvf recv_response: decode error: {:?}", e), diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 6bb6ca5b644..8e02f540d32 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -339,17 +339,17 @@ fn handle_mux( spawned, worker, idle, - Err(PrepareError::CreateTmpFileErr(err)), + Err(PrepareError::CreateTmpFile(err)), ), // Return `Concluded`, but do not kill the worker since the error was on the host // side. - Outcome::RenameTmpFileErr { worker: idle, result: _, err, src, dest } => + Outcome::RenameTmpFile { worker: idle, result: _, err, src, dest } => handle_concluded_no_rip( from_pool, spawned, worker, idle, - Err(PrepareError::RenameTmpFileErr { err, src, dest }), + Err(PrepareError::RenameTmpFile { err, src, dest }), ), // Could not clear worker cache. Kill the worker so other jobs can't see the data. Outcome::ClearWorkerDir { err } => { @@ -387,6 +387,21 @@ fn handle_mux( Ok(()) }, + // The worker might still be usable, but we kill it just in case. + Outcome::JobDied(err) => { + if attempt_retire(metrics, spawned, worker) { + reply( + from_pool, + FromPool::Concluded { + worker, + rip: true, + result: Err(PrepareError::JobDied(err)), + }, + )?; + } + + Ok(()) + }, Outcome::TimedOut => { if attempt_retire(metrics, spawned, worker) { reply( diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index 0e50caf1feb..a22fa74b2fe 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -79,7 +79,7 @@ pub enum Outcome { CreateTmpFileErr { worker: IdleWorker, err: String }, /// The response from the worker is received, but the tmp file cannot be renamed (moved) to the /// final destination location. - RenameTmpFileErr { + RenameTmpFile { worker: IdleWorker, result: PrepareResult, err: String, @@ -100,6 +100,10 @@ pub enum Outcome { IoErr(String), /// The worker ran out of memory and is aborting. The worker should be ripped. OutOfMemory, + /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. + /// + /// The worker might still be usable, but we kill it just in case. + JobDied(String), } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -187,21 +191,6 @@ pub async fn start_work( "failed to recv a prepare response: {:?}", err, ); - - // The worker died. Check if it was due to a seccomp violation. - // - // NOTE: Log, but don't change the outcome. Not all validators may have auditing - // enabled, so we don't want attackers to abuse a non-deterministic outcome. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - ?pvf, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - Outcome::IoErr(err.to_string()) }, Err(_) => { @@ -236,6 +225,7 @@ async fn handle_response( Ok(result) => result, // Timed out on the child. This should already be logged by the child. Err(PrepareError::TimedOut) => return Outcome::TimedOut, + Err(PrepareError::JobDied(err)) => return Outcome::JobDied(err), Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, Err(_) => return Outcome::Concluded { worker, result }, }; @@ -272,7 +262,7 @@ async fn handle_response( artifact_path.display(), err, ); - Outcome::RenameTmpFileErr { + Outcome::RenameTmpFile { worker, result, err: format!("{:?}", err), diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 4c038896f7f..400b65bfe7d 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Various things for testing other crates. +//! Various utilities for testing. pub use crate::{ host::{EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}, @@ -59,27 +59,33 @@ pub fn validate_candidate( /// /// NOTE: This should only be called in dev code (tests, benchmarks) as it relies on the relative /// paths of the built workers. -pub fn get_and_check_worker_paths() -> (PathBuf, PathBuf) { +pub fn build_workers_and_get_paths(is_bench: bool) -> (PathBuf, PathBuf) { // Only needs to be called once for the current process. static WORKER_PATHS: OnceLock> = OnceLock::new(); - fn build_workers() { - let build_args = vec![ + fn build_workers(is_bench: bool) { + let mut build_args = vec![ "build", "--package=polkadot", "--bin=polkadot-prepare-worker", "--bin=polkadot-execute-worker", ]; - let exit_status = std::process::Command::new("cargo") + if is_bench { + // Benches require --release. Regular tests are debug (no flag needed). + build_args.push("--release"); + } + let mut cargo = std::process::Command::new("cargo"); + let cmd = cargo // wasm runtime not needed .env("SKIP_WASM_BUILD", "1") .args(build_args) - .stdout(std::process::Stdio::piped()) - .status() - .expect("Failed to run the build program"); + .stdout(std::process::Stdio::piped()); + + println!("INFO: calling `{cmd:?}`"); + let exit_status = cmd.status().expect("Failed to run the build program"); if !exit_status.success() { - eprintln!("Failed to build workers: {}", exit_status.code().unwrap()); + eprintln!("ERROR: Failed to build workers: {}", exit_status.code().unwrap()); std::process::exit(1); } } @@ -95,23 +101,23 @@ pub fn get_and_check_worker_paths() -> (PathBuf, PathBuf) { // explain why a build happens if !prepare_worker_path.is_executable() { - eprintln!("Prepare worker does not exist or is not executable. Workers directory: {:?}", workers_path); + println!("WARN: Prepare worker does not exist or is not executable. Workers directory: {:?}", workers_path); } if !execute_worker_path.is_executable() { - eprintln!("Execute worker does not exist or is not executable. Workers directory: {:?}", workers_path); + println!("WARN: Execute worker does not exist or is not executable. Workers directory: {:?}", workers_path); } if let Ok(ver) = get_worker_version(&prepare_worker_path) { if ver != NODE_VERSION { - eprintln!("Prepare worker version {ver} does not match node version {NODE_VERSION}; worker path: {prepare_worker_path:?}"); + println!("WARN: Prepare worker version {ver} does not match node version {NODE_VERSION}; worker path: {prepare_worker_path:?}"); } } if let Ok(ver) = get_worker_version(&execute_worker_path) { if ver != NODE_VERSION { - eprintln!("Execute worker version {ver} does not match node version {NODE_VERSION}; worker path: {execute_worker_path:?}"); + println!("WARN: Execute worker version {ver} does not match node version {NODE_VERSION}; worker path: {execute_worker_path:?}"); } } - build_workers(); + build_workers(is_bench); Mutex::new((prepare_worker_path, execute_worker_path)) }); diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 801b60884fa..d2d842cf84a 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -19,23 +19,23 @@ use assert_matches::assert_matches; use parity_scale_codec::Encode as _; use polkadot_node_core_pvf::{ - start, testing::get_and_check_worker_paths, Config, InvalidCandidate, Metrics, PrepareError, + start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::{ExecutorParam, ExecutorParams}; -#[cfg(target_os = "linux")] -use rusty_fork::rusty_fork_test; use std::time::Duration; use tokio::sync::Mutex; mod adder; +#[cfg(target_os = "linux")] +mod process; mod worker_common; -const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); -const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(3); +const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(6); +const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(6); struct TestHost { cache_dir: tempfile::TempDir, @@ -51,7 +51,7 @@ impl TestHost { where F: FnOnce(&mut Config), { - let (prepare_worker_path, execute_worker_path) = get_and_check_worker_paths(); + let (prepare_worker_path, execute_worker_path) = build_workers_and_get_paths(false); let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( @@ -126,7 +126,26 @@ impl TestHost { } #[tokio::test] -async fn terminates_on_timeout() { +async fn prepare_job_terminates_on_timeout() { + let host = TestHost::new().await; + + let start = std::time::Instant::now(); + let result = host + .precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()) + .await; + + match result { + Err(PrepareError::TimedOut) => {}, + r => panic!("{:?}", r), + } + + let duration = std::time::Instant::now().duration_since(start); + assert!(duration >= TEST_PREPARATION_TIMEOUT); + assert!(duration < TEST_PREPARATION_TIMEOUT * JOB_TIMEOUT_WALL_CLOCK_FACTOR); +} + +#[tokio::test] +async fn execute_job_terminates_on_timeout() { let host = TestHost::new().await; let start = std::time::Instant::now(); @@ -153,108 +172,6 @@ async fn terminates_on_timeout() { assert!(duration < TEST_EXECUTION_TIMEOUT * JOB_TIMEOUT_WALL_CLOCK_FACTOR); } -#[cfg(target_os = "linux")] -fn kill_by_sid_and_name(sid: i32, exe_name: &'static str) { - use procfs::process; - - let all_processes: Vec = process::all_processes() - .expect("Can't read /proc") - .filter_map(|p| match p { - Ok(p) => Some(p), // happy path - Err(e) => match e { - // process vanished during iteration, ignore it - procfs::ProcError::NotFound(_) => None, - x => { - panic!("some unknown error: {}", x); - }, - }, - }) - .collect(); - - for process in all_processes { - if process.stat().unwrap().session == sid && - process.exe().unwrap().to_str().unwrap().contains(exe_name) - { - assert_eq!(unsafe { libc::kill(process.pid(), 9) }, 0); - } - } -} - -// Run these tests in their own processes with rusty-fork. They work by each creating a new session, -// then killing the worker process that matches the session ID and expected worker name. -#[cfg(target_os = "linux")] -rusty_fork_test! { - // What happens when the prepare worker dies in the middle of a job? - #[test] - fn prepare_worker_killed_during_job() { - const PROCESS_NAME: &'static str = "polkadot-prepare-worker"; - - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let host = TestHost::new().await; - - // Create a new session and get the session ID. - let sid = unsafe { libc::setsid() }; - assert!(sid > 0); - - let (result, _) = futures::join!( - // Choose a job that would normally take the entire timeout. - host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), - // Run a future that kills the job in the middle of the timeout. - async { - tokio::time::sleep(TEST_PREPARATION_TIMEOUT / 2).await; - kill_by_sid_and_name(sid, PROCESS_NAME); - } - ); - - assert_matches!(result, Err(PrepareError::IoErr(_))); - }) - } - - // What happens when the execute worker dies in the middle of a job? - #[test] - fn execute_worker_killed_during_job() { - const PROCESS_NAME: &'static str = "polkadot-execute-worker"; - - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - let host = TestHost::new().await; - - // Create a new session and get the session ID. - let sid = unsafe { libc::setsid() }; - assert!(sid > 0); - - // Prepare the artifact ahead of time. - let binary = halt::wasm_binary_unwrap(); - host.precheck_pvf(binary, Default::default()).await.unwrap(); - - let (result, _) = futures::join!( - // Choose an job that would normally take the entire timeout. - host.validate_candidate( - binary, - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, - Default::default(), - ), - // Run a future that kills the job in the middle of the timeout. - async { - tokio::time::sleep(TEST_EXECUTION_TIMEOUT / 2).await; - kill_by_sid_and_name(sid, PROCESS_NAME); - } - ); - - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) - ); - }) - } -} - #[cfg(feature = "ci-only-tests")] #[tokio::test] async fn ensure_parallel_execution() { diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs new file mode 100644 index 00000000000..725d060ab91 --- /dev/null +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -0,0 +1,383 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test unexpected behaviors of the spawned processes. We test both worker processes (directly +//! spawned by the host) and job processes (spawned by the workers to securely perform PVF jobs). + +use super::TestHost; +use assert_matches::assert_matches; +use polkadot_node_core_pvf::{InvalidCandidate, PrepareError, ValidationError}; +use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams}; +use procfs::process; +use rusty_fork::rusty_fork_test; +use std::time::Duration; + +const PREPARE_PROCESS_NAME: &'static str = "polkadot-prepare-worker"; +const EXECUTE_PROCESS_NAME: &'static str = "polkadot-execute-worker"; + +const SIGNAL_KILL: i32 = 9; +const SIGNAL_STOP: i32 = 19; + +fn send_signal_by_sid_and_name( + sid: i32, + exe_name: &'static str, + is_direct_child: bool, + signal: i32, +) { + let process = find_process_by_sid_and_name(sid, exe_name, is_direct_child); + assert_eq!(unsafe { libc::kill(process.pid(), signal) }, 0); +} +fn get_num_threads_by_sid_and_name(sid: i32, exe_name: &'static str, is_direct_child: bool) -> i64 { + let process = find_process_by_sid_and_name(sid, exe_name, is_direct_child); + process.stat().unwrap().num_threads +} + +fn find_process_by_sid_and_name( + sid: i32, + exe_name: &'static str, + is_direct_child: bool, +) -> process::Process { + let all_processes: Vec = process::all_processes() + .expect("Can't read /proc") + .filter_map(|p| match p { + Ok(p) => Some(p), // happy path + Err(e) => match e { + // process vanished during iteration, ignore it + procfs::ProcError::NotFound(_) => None, + x => { + panic!("some unknown error: {}", x); + }, + }, + }) + .collect(); + + let mut found = None; + for process in all_processes { + let stat = process.stat().unwrap(); + + if stat.session != sid || !process.exe().unwrap().to_str().unwrap().contains(exe_name) { + continue + } + // The workers are direct children of the current process, the worker job processes are not + // (they are children of the workers). + let process_is_direct_child = stat.ppid as u32 == std::process::id(); + if is_direct_child != process_is_direct_child { + continue + } + + if found.is_some() { + panic!("Found more than one process") + } + found = Some(process); + } + found.expect("Should have found the expected process") +} + +// Run these tests in their own processes with rusty-fork. They work by each creating a new session, +// then doing something with the child process that matches the session ID and expected process +// name. +rusty_fork_test! { + // What happens when the prepare worker (not the job) times out? + #[test] + fn prepare_worker_timeout() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Send a stop signal to pause the worker. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_STOP); + } + ); + + assert_matches!(result, Err(PrepareError::TimedOut)); + }) + } + + // What happens when the execute worker (not the job) times out? + #[test] + fn execute_worker_timeout() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Send a stop signal to pause the worker. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_STOP); + } + ); + + assert_matches!( + result, + Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) + ); + }) + } + + // What happens when the prepare worker dies in the middle of a job? + #[test] + fn prepare_worker_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + + assert_matches!(result, Err(PrepareError::IoErr(_))); + }) + } + + // What happens when the execute worker dies in the middle of a job? + #[test] + fn execute_worker_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose an job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + + assert_matches!( + result, + Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + ); + }) + } + + // What happens when the forked prepare job dies in the middle of its job? + #[test] + fn forked_prepare_job_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, false, SIGNAL_KILL); + } + ); + + // Note that we get a more specific error if the job died than if the whole worker died. + assert_matches!( + result, + Err(PrepareError::JobDied(err)) if err == "received signal: SIGKILL" + ); + }) + } + + // What happens when the forked execute job dies in the middle of its job? + #[test] + fn forked_execute_job_killed_during_job() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let (result, _) = futures::join!( + // Choose a job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, false, SIGNAL_KILL); + } + ); + + // Note that we get a more specific error if the job died than if the whole worker died. + assert_matches!( + result, + Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousJobDeath(err))) + if err == "received signal: SIGKILL" + ); + }) + } + + // Ensure that the spawned prepare worker is single-threaded. + // + // See `run_worker` for why we need this invariant. + #[test] + fn ensure_prepare_processes_have_correct_num_threads() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + let _ = futures::join!( + // Choose a job that would normally take the entire timeout. + host.precheck_pvf(rococo_runtime::WASM_BINARY.unwrap(), Default::default()), + // Run a future that kills the job while it's running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + assert_eq!( + get_num_threads_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true), + 1 + ); + // Child job should have three threads: main thread, execute thread, CPU time + // monitor, and memory tracking. + assert_eq!( + get_num_threads_by_sid_and_name(sid, PREPARE_PROCESS_NAME, false), + 4 + ); + + // End the test. + send_signal_by_sid_and_name(sid, PREPARE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + }) + } + + // Ensure that the spawned execute worker is single-threaded. + // + // See `run_worker` for why we need this invariant. + #[test] + fn ensure_execute_processes_have_correct_num_threads() { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + let host = TestHost::new().await; + + // Create a new session and get the session ID. + let sid = unsafe { libc::setsid() }; + assert!(sid > 0); + + // Prepare the artifact ahead of time. + let binary = halt::wasm_binary_unwrap(); + host.precheck_pvf(binary, Default::default()).await.unwrap(); + + let _ = futures::join!( + // Choose a job that would normally take the entire timeout. + host.validate_candidate( + binary, + ValidationParams { + block_data: BlockData(Vec::new()), + parent_head: Default::default(), + relay_parent_number: 1, + relay_parent_storage_root: Default::default(), + }, + Default::default(), + ), + // Run a future that tests the thread count while the worker is running. + async { + tokio::time::sleep(Duration::from_secs(1)).await; + assert_eq!( + get_num_threads_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true), + 1 + ); + // Child job should have three threads: main thread, execute thread, and CPU + // time monitor. + assert_eq!( + get_num_threads_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, false), + 3 + ); + + // End the test. + send_signal_by_sid_and_name(sid, EXECUTE_PROCESS_NAME, true, SIGNAL_KILL); + } + ); + }) + } +} diff --git a/polkadot/node/core/pvf/tests/it/worker_common.rs b/polkadot/node/core/pvf/tests/it/worker_common.rs index df64980dc80..0d33af7e096 100644 --- a/polkadot/node/core/pvf/tests/it/worker_common.rs +++ b/polkadot/node/core/pvf/tests/it/worker_common.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use polkadot_node_core_pvf::{ - testing::{get_and_check_worker_paths, spawn_with_program_path, SpawnErr}, + testing::{build_workers_and_get_paths, spawn_with_program_path, SpawnErr}, SecurityStatus, }; use std::{env, time::Duration}; @@ -23,7 +23,7 @@ use std::{env, time::Duration}; // Test spawning a program that immediately exits with a failure code. #[tokio::test] async fn spawn_immediate_exit() { - let (prepare_worker_path, _) = get_and_check_worker_paths(); + let (prepare_worker_path, _) = build_workers_and_get_paths(false); // There's no explicit `exit` subcommand in the worker; it will panic on an unknown // subcommand anyway @@ -41,7 +41,7 @@ async fn spawn_immediate_exit() { #[tokio::test] async fn spawn_timeout() { - let (_, execute_worker_path) = get_and_check_worker_paths(); + let (_, execute_worker_path) = build_workers_and_get_paths(false); let result = spawn_with_program_path( "integration-test", @@ -57,7 +57,7 @@ async fn spawn_timeout() { #[tokio::test] async fn should_connect() { - let (prepare_worker_path, _) = get_and_check_worker_paths(); + let (prepare_worker_path, _) = build_workers_and_get_paths(false); let _ = spawn_with_program_path( "integration-test", diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index 52129f9eb80..4dbb7980c1b 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -1,7 +1,11 @@ # PVF Host and Workers The PVF host is responsible for handling requests to prepare and execute PVF -code blobs, which it sends to PVF workers running in their own child processes. +code blobs, which it sends to PVF **workers** running in their own child +processes. + +While the workers are generally long-living, they also spawn one-off secure +**job processes** that perform the jobs. See "Job Processes" section below. This system has two high-levels goals that we will touch on here: *determinism* and *security*. @@ -36,8 +40,11 @@ execution request: not successful. 2. **Artifact missing:** The prepared artifact might have been deleted due to operator error or some bug in the system. -3. **Panic:** The worker thread panicked for some indeterminate reason, which - may or may not be independent of the candidate or PVF. +3. **Job errors:** For example, the worker thread panicked for some + indeterminate reason, which may or may not be independent of the candidate or + PVF. +4. **Internal errors:** See "Internal Errors" section. In this case, after the + retry we abstain from voting. ### Preparation timeouts @@ -62,10 +69,16 @@ more than the CPU time. ### Internal errors +An internal, or local, error is one that we treat as independent of the PVF +and/or candidate, i.e. local to the running machine. If this happens, then we +will first retry the job and if the errors persists, then we simply do not vote. +This prevents slashes, since otherwise our vote may not agree with that of the +other validators. + In general, for errors not raising a dispute we have to be very careful. This is -only sound, if we either: +only sound, if either: -1. Ruled out that error in pre-checking. If something is not checked in +1. We ruled out that error in pre-checking. If something is not checked in pre-checking, even if independent of the candidate and PVF, we must raise a dispute. 2. We are 100% confident that it is a hardware/local issue: Like corrupted file, @@ -75,11 +88,11 @@ Reasoning: Otherwise it would be possible to register a PVF where candidates can not be checked, but we don't get a dispute - so nobody gets punished. Second, we end up with a finality stall that is not going to resolve! -There are some error conditions where we can't be sure whether the candidate is -really invalid or some internal glitch occurred, e.g. panics. Whenever we are -unsure, we can never treat an error as internal as we would abstain from voting. -So we will first retry the candidate, and if the issue persists we are forced to -vote invalid. +Note that any error from the job process we cannot treat as internal. The job +runs untrusted code and an attacker can therefore return arbitrary errors. If +they were to return errors that we treat as internal, they could make us abstain +from voting. Since we are unsure if such errors are legitimate, we will first +retry the candidate, and if the issue persists we are forced to vote invalid. ## Security @@ -119,6 +132,20 @@ So what are we actually worried about? Things that come to mind: 6. **Intercepting and manipulating packages** - Effect very similar to the above, hard to do without also being able to do 4 or 5. +### Job Processes + +As mentioned above, our architecture includes long-living **worker processes** +and one-off **job processes*. This separation is important so that the handling +of untrusted code can be limited to the job processes. A hijacked job process +can therefore not interfere with other jobs running in separate processes. + +Furthermore, if an unexpected execution error occurred in the worker and not the +job, we generally can be confident that it has nothing to do with the candidate, +so we can abstain from voting. On the other hand, a hijacked job can send back +erroneous responses for candidates, so we know that we should not abstain from +voting on such errors from jobs. Otherwise, an attacker could trigger a finality +stall. (See "Internal Errors" section above.) + ### Restricting file-system access A basic security mechanism is to make sure that any process directly interfacing diff --git a/polkadot/scripts/list-syscalls/execute-worker-syscalls b/polkadot/scripts/list-syscalls/execute-worker-syscalls index 4a7a6618129..349af783cf1 100644 --- a/polkadot/scripts/list-syscalls/execute-worker-syscalls +++ b/polkadot/scripts/list-syscalls/execute-worker-syscalls @@ -16,6 +16,7 @@ 16 (ioctl) 19 (readv) 20 (writev) +22 (pipe) 24 (sched_yield) 25 (mremap) 28 (madvise) @@ -25,7 +26,9 @@ 45 (recvfrom) 46 (sendmsg) 56 (clone) +57 (fork) 60 (exit) +61 (wait4) 62 (kill) 72 (fcntl) 79 (getcwd) @@ -36,6 +39,7 @@ 89 (readlink) 96 (gettimeofday) 97 (getrlimit) +98 (getrusage) 99 (sysinfo) 102 (getuid) 110 (getppid) @@ -47,6 +51,7 @@ 158 (arch_prctl) 165 (mount) 166 (umount2) +186 (gettid) 200 (tkill) 202 (futex) 204 (sched_getaffinity) @@ -60,6 +65,7 @@ 263 (unlinkat) 272 (unshare) 273 (set_robust_list) +293 (pipe2) 302 (prlimit64) 318 (getrandom) 319 (memfd_create) diff --git a/polkadot/scripts/list-syscalls/prepare-worker-syscalls b/polkadot/scripts/list-syscalls/prepare-worker-syscalls index cab58e06692..05281b61591 100644 --- a/polkadot/scripts/list-syscalls/prepare-worker-syscalls +++ b/polkadot/scripts/list-syscalls/prepare-worker-syscalls @@ -16,6 +16,7 @@ 16 (ioctl) 19 (readv) 20 (writev) +22 (pipe) 24 (sched_yield) 25 (mremap) 28 (madvise) @@ -25,7 +26,9 @@ 45 (recvfrom) 46 (sendmsg) 56 (clone) +57 (fork) 60 (exit) +61 (wait4) 62 (kill) 72 (fcntl) 79 (getcwd) @@ -48,6 +51,7 @@ 158 (arch_prctl) 165 (mount) 166 (umount2) +186 (gettid) 200 (tkill) 202 (futex) 203 (sched_setaffinity) @@ -62,6 +66,7 @@ 263 (unlinkat) 272 (unshare) 273 (set_robust_list) +293 (pipe2) 302 (prlimit64) 309 (getcpu) 318 (getrandom) -- GitLab From 31c38cea3def97044c328f3a9717eeb471c261fe Mon Sep 17 00:00:00 2001 From: Kristian Sosnin <48099298+slumber@users.noreply.github.com> Date: Tue, 14 Nov 2023 22:14:59 +0400 Subject: [PATCH 053/199] statement-distribution: support inactive local validator in grid (#1571) Fixes #1437 Co-authored-by: Sophia Gold --- .../statement-distribution/src/v2/mod.rs | 335 +++++++++++------- .../src/v2/tests/cluster.rs | 70 ++-- .../src/v2/tests/grid.rs | 241 ++++++++++--- .../src/v2/tests/mod.rs | 51 ++- .../src/v2/tests/requests.rs | 76 ++-- 5 files changed, 514 insertions(+), 259 deletions(-) diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 6f39a5c504d..406f1130590 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -142,8 +142,27 @@ struct PerRelayParentState { session: SessionIndex, } +impl PerRelayParentState { + fn active_validator_state(&self) -> Option<&ActiveValidatorState> { + self.local_validator.as_ref().and_then(|local| local.active.as_ref()) + } + + fn active_validator_state_mut(&mut self) -> Option<&mut ActiveValidatorState> { + self.local_validator.as_mut().and_then(|local| local.active.as_mut()) + } +} + // per-relay-parent local validator state. struct LocalValidatorState { + // the grid-level communication at this relay-parent. + grid_tracker: GridTracker, + // additional fields in case local node is an active validator. + active: Option, + // local index actually exists in case node is inactive validator, however, + // it's not needed outside of `build_session_topology`, where it's known. +} + +struct ActiveValidatorState { // The index of the validator. index: ValidatorIndex, // our validator group @@ -152,8 +171,14 @@ struct LocalValidatorState { assignment: Option, // the 'direct-in-group' communication at this relay-parent. cluster_tracker: ClusterTracker, - // the grid-level communication at this relay-parent. - grid_tracker: GridTracker, +} + +#[derive(Debug, Copy, Clone)] +enum LocalValidatorIndex { + // Local node is an active validator. + Active(ValidatorIndex), + // Local node is not in active validator set. + Inactive, } #[derive(Debug)] @@ -164,7 +189,7 @@ struct PerSessionState { // is only `None` in the time between seeing a session and // getting the topology from the gossip-support subsystem grid_view: Option, - local_validator: Option, + local_validator: Option, } impl PerSessionState { @@ -178,15 +203,10 @@ impl PerSessionState { let local_validator = polkadot_node_subsystem_util::signing_key_and_index( session_info.validators.iter(), keystore, - ); + ) + .map(|(_, index)| LocalValidatorIndex::Active(index)); - PerSessionState { - session_info, - groups, - authority_lookup, - grid_view: None, - local_validator: local_validator.map(|(_key, index)| index), - } + PerSessionState { session_info, groups, authority_lookup, grid_view: None, local_validator } } fn supply_topology( @@ -204,6 +224,16 @@ impl PerSessionState { ); self.grid_view = Some(grid_view); + if local_index.is_some() { + self.local_validator.get_or_insert(LocalValidatorIndex::Inactive); + } + } + + /// Returns `true` if local is neither active or inactive validator node. + /// + /// `false` is also returned if session topology is not known yet. + fn is_not_validator(&self) -> bool { + self.grid_view.is_some() && self.local_validator.is_none() } } @@ -554,13 +584,17 @@ pub(crate) async fn handle_active_leaves_update( .expect("either existed or just inserted; qed"); let local_validator = per_session.local_validator.and_then(|v| { - find_local_validator_state( - v, - &per_session.groups, - &availability_cores, - &group_rotation_info, - seconding_limit, - ) + if let LocalValidatorIndex::Active(idx) = v { + find_active_validator_state( + idx, + &per_session.groups, + &availability_cores, + &group_rotation_info, + seconding_limit, + ) + } else { + Some(LocalValidatorState { grid_tracker: GridTracker::default(), active: None }) + } }); state.per_relay_parent.insert( @@ -607,7 +641,7 @@ pub(crate) async fn handle_active_leaves_update( Ok(()) } -fn find_local_validator_state( +fn find_active_validator_state( validator_index: ValidatorIndex, groups: &Groups, availability_cores: &[CoreState], @@ -628,11 +662,13 @@ fn find_local_validator_state( let group_validators = groups.get(our_group)?.to_owned(); Some(LocalValidatorState { - index: validator_index, - group: our_group, - assignment: para, - cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) - .expect("group is non-empty because we are in it; qed"), + active: Some(ActiveValidatorState { + index: validator_index, + group: our_group, + assignment: para, + cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) + .expect("group is non-empty because we are in it; qed"), + }), grid_tracker: GridTracker::default(), }) } @@ -725,13 +761,17 @@ async fn send_peer_messages_for_relay_parent( for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session_state.authority_lookup.get(a) }) { - if let Some(local_validator_state) = relay_parent_state.local_validator.as_mut() { + if let Some(active) = relay_parent_state + .local_validator + .as_mut() + .and_then(|local| local.active.as_mut()) + { send_pending_cluster_statements( ctx, relay_parent, &(peer, peer_data.protocol_version), validator_id, - &mut local_validator_state.cluster_tracker, + &mut active.cluster_tracker, &state.candidates, &relay_parent_state.statement_store, ) @@ -1009,7 +1049,7 @@ pub(crate) async fn share_local_statement( }; let (local_index, local_assignment, local_group) = - match per_relay_parent.local_validator.as_ref() { + match per_relay_parent.active_validator_state() { None => return Err(JfyiError::InvalidShare), Some(l) => (l.index, l.assignment, l.group), }; @@ -1086,7 +1126,7 @@ pub(crate) async fn share_local_statement( } { - let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); + let l = per_relay_parent.active_validator_state_mut().expect("checked above; qed"); l.cluster_tracker.note_issued(local_index, compact_statement.payload().clone()); } @@ -1173,31 +1213,41 @@ async fn circulate_statement( // We're not meant to circulate statements in the cluster until we have the confirmed // candidate. - let cluster_relevant = Some(local_validator.group) == statement_group; - let cluster_targets = if is_confirmed && cluster_relevant { - Some( - local_validator - .cluster_tracker - .targets() - .iter() - .filter(|&&v| { - local_validator + // + // Cluster is only relevant if local node is an active validator. + let (cluster_relevant, cluster_targets, all_cluster_targets) = local_validator + .active + .as_mut() + .map(|active| { + let cluster_relevant = Some(active.group) == statement_group; + let cluster_targets = if is_confirmed && cluster_relevant { + Some( + active .cluster_tracker - .can_send(v, originator, compact_statement.clone()) - .is_ok() - }) - .filter(|&v| v != &local_validator.index) - .map(|v| (*v, DirectTargetKind::Cluster)), - ) - } else { - None - }; + .targets() + .iter() + .filter(|&&v| { + active + .cluster_tracker + .can_send(v, originator, compact_statement.clone()) + .is_ok() + }) + .filter(|&v| v != &active.index) + .map(|v| (*v, DirectTargetKind::Cluster)), + ) + } else { + None + }; + let all_cluster_targets = active.cluster_tracker.targets(); + (cluster_relevant, cluster_targets, all_cluster_targets) + }) + .unwrap_or((false, None, &[])); let grid_targets = local_validator .grid_tracker .direct_statement_targets(&per_session.groups, originator, &compact_statement) .into_iter() - .filter(|v| !cluster_relevant || !local_validator.cluster_tracker.targets().contains(v)) + .filter(|v| !cluster_relevant || !all_cluster_targets.contains(v)) .map(|v| (v, DirectTargetKind::Grid)); let targets = cluster_targets @@ -1229,18 +1279,17 @@ async fn circulate_statement( match kind { DirectTargetKind::Cluster => { + let active = local_validator + .active + .as_mut() + .expect("cluster target means local is active validator; qed"); + // At this point, all peers in the cluster should 'know' // the candidate, so we don't expect for this to fail. - if let Ok(()) = local_validator.cluster_tracker.can_send( - target, - originator, - compact_statement.clone(), - ) { - local_validator.cluster_tracker.note_sent( - target, - originator, - compact_statement.clone(), - ); + if let Ok(()) = + active.cluster_tracker.can_send(target, originator, compact_statement.clone()) + { + active.cluster_tracker.note_sent(target, originator, compact_statement.clone()); statement_to_peers.push(peer_id); } }, @@ -1387,7 +1436,9 @@ async fn handle_incoming_statement( None => { // we shouldn't be receiving statements unless we're a validator // this session. - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + if per_session.is_not_validator() { + modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + } return }, Some(l) => l, @@ -1402,73 +1453,81 @@ async fn handle_incoming_statement( }, }; - let cluster_sender_index = { + let (active, cluster_sender_index) = { // This block of code only returns `Some` when both the originator and // the sending peer are in the cluster. + let active = local_validator.active.as_mut(); - let allowed_senders = local_validator - .cluster_tracker - .senders_for_originator(statement.unchecked_validator_index()); + let allowed_senders = active + .as_ref() + .map(|active| { + active + .cluster_tracker + .senders_for_originator(statement.unchecked_validator_index()) + }) + .unwrap_or_default(); - allowed_senders + let idx = allowed_senders .iter() .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (*i, ad))) .filter(|(_, ad)| peer_state.is_authority(ad)) .map(|(i, _)| i) - .next() - }; - - let checked_statement = if let Some(cluster_sender_index) = cluster_sender_index { - match handle_cluster_statement( - relay_parent, - &mut local_validator.cluster_tracker, - per_relay_parent.session, - &per_session.session_info, - statement, - cluster_sender_index, - ) { - Ok(Some(s)) => s, - Ok(None) => return, - Err(rep) => { - modify_reputation(reputation, ctx.sender(), peer, rep).await; - return - }, - } - } else { - let grid_sender_index = local_validator - .grid_tracker - .direct_statement_providers( - &per_session.groups, - statement.unchecked_validator_index(), - statement.unchecked_payload(), - ) - .into_iter() - .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) - .filter(|(_, ad)| peer_state.is_authority(ad)) - .map(|(i, _)| i) .next(); + (active, idx) + }; - if let Some(grid_sender_index) = grid_sender_index { - match handle_grid_statement( + let checked_statement = + if let Some((active, cluster_sender_index)) = active.zip(cluster_sender_index) { + match handle_cluster_statement( relay_parent, - &mut local_validator.grid_tracker, + &mut active.cluster_tracker, per_relay_parent.session, - &per_session, + &per_session.session_info, statement, - grid_sender_index, + cluster_sender_index, ) { - Ok(s) => s, + Ok(Some(s)) => s, + Ok(None) => return, Err(rep) => { modify_reputation(reputation, ctx.sender(), peer, rep).await; return }, } } else { - // Not a cluster or grid peer. - modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - return - } - }; + let grid_sender_index = local_validator + .grid_tracker + .direct_statement_providers( + &per_session.groups, + statement.unchecked_validator_index(), + statement.unchecked_payload(), + ) + .into_iter() + .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next(); + + if let Some(grid_sender_index) = grid_sender_index { + match handle_grid_statement( + relay_parent, + &mut local_validator.grid_tracker, + per_relay_parent.session, + &per_session, + statement, + grid_sender_index, + ) { + Ok(s) => s, + Err(rep) => { + modify_reputation(reputation, ctx.sender(), peer, rep).await; + return + }, + } + } else { + // Not a cluster or grid peer. + modify_reputation(reputation, ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } + }; let statement = checked_statement.payload().clone(); let originator_index = checked_statement.validator_index(); @@ -1536,7 +1595,7 @@ async fn handle_incoming_statement( local_validator.grid_tracker.learned_fresh_statement( &per_session.groups, session_topology, - local_validator.index, + originator_index, &statement, ); } @@ -1834,7 +1893,7 @@ async fn provide_candidate_to_grid( gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = manifest_peers_v2.len(), "Sending manifest to v2 peers" ); @@ -1853,7 +1912,7 @@ async fn provide_candidate_to_grid( gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = manifest_peers_vstaging.len(), "Sending manifest to vstaging peers" ); @@ -1874,7 +1933,7 @@ async fn provide_candidate_to_grid( gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = ack_peers_v2.len(), "Sending acknowledgement to v2 peers" ); @@ -1893,7 +1952,7 @@ async fn provide_candidate_to_grid( gum::debug!( target: LOG_TARGET, ?candidate_hash, - local_validator = ?local_validator.index, + local_validator = ?per_session.local_validator, n_peers = ack_peers_vstaging.len(), "Sending acknowledgement to vstaging peers" ); @@ -2086,13 +2145,15 @@ async fn handle_incoming_manifest_common<'a, Context>( let local_validator = match relay_parent_state.local_validator.as_mut() { None => { - modify_reputation( - reputation, - ctx.sender(), - peer, - COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, - ) - .await; + if per_session.is_not_validator() { + modify_reputation( + reputation, + ctx.sender(), + peer, + COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE, + ) + .await; + } return None }, Some(x) => x, @@ -2188,7 +2249,7 @@ async fn handle_incoming_manifest_common<'a, Context>( target: LOG_TARGET, ?candidate_hash, from = ?sender_index, - local_index = ?local_validator.index, + local_index = ?per_session.local_validator, ?manifest_kind, "immediate ack, known candidate" ); @@ -2593,7 +2654,7 @@ async fn send_cluster_candidate_statements( Some(s) => s, }; - let local_group = match relay_parent_state.local_validator.as_mut() { + let local_group = match relay_parent_state.active_validator_state_mut() { None => return, Some(v) => v.group, }; @@ -2680,11 +2741,10 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St }) { // For cluster members, they haven't advertised any statements in particular, // but have surely sent us some. - if local_validator - .cluster_tracker - .knows_candidate(validator_id, identifier.candidate_hash) - { - return Some(StatementFilter::blank(local_validator.cluster_tracker.targets().len())) + if let Some(active) = local_validator.active.as_ref() { + if active.cluster_tracker.knows_candidate(validator_id, identifier.candidate_hash) { + return Some(StatementFilter::blank(active.cluster_tracker.targets().len())) + } } let filter = local_validator @@ -2715,7 +2775,11 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St } // don't require a backing threshold for cluster candidates. - let require_backing = relay_parent_state.local_validator.as_ref()?.group != group_index; + let local_validator = relay_parent_state.local_validator.as_ref()?; + let require_backing = local_validator + .active + .as_ref() + .map_or(true, |active| active.group != group_index); Some(RequestProperties { unwanted_mask, @@ -2973,7 +3037,11 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { for v in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { per_session.authority_lookup.get(a) }) { - if local_validator.cluster_tracker.can_request(v, *candidate_hash) { + if local_validator + .active + .as_ref() + .map_or(false, |active| active.cluster_tracker.can_request(v, *candidate_hash)) + { validator_id = Some(v); is_cluster = true; break @@ -3015,11 +3083,16 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { // Update bookkeeping about which statements peers have received. for statement in &statements { if is_cluster { - local_validator.cluster_tracker.note_sent( - validator_id, - statement.unchecked_validator_index(), - statement.unchecked_payload().clone(), - ); + local_validator + .active + .as_mut() + .expect("cluster peer means local is active validator; qed") + .cluster_tracker + .note_sent( + validator_id, + statement.unchecked_validator_index(), + statement.unchecked_payload().clone(), + ); } else { local_validator.grid_tracker.sent_or_received_direct_statement( &per_session.groups, diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 80dec1d75ab..a9f5b537b32 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -23,7 +23,7 @@ fn share_seconded_circulated_to_cluster() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -34,7 +34,8 @@ fn share_seconded_circulated_to_cluster() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -52,7 +53,7 @@ fn share_seconded_circulated_to_cluster() { // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -130,7 +131,7 @@ fn cluster_valid_statement_before_seconded_ignored() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -139,12 +140,13 @@ fn cluster_valid_statement_before_seconded_ignored() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer( &mut overseer, @@ -197,7 +199,7 @@ fn cluster_statement_bad_signature() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -206,12 +208,13 @@ fn cluster_statement_bad_signature() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -277,7 +280,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -286,13 +289,13 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. - let not_our_group = - if local_validator.group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; + let not_our_group = if local_group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; let that_group_validators = state.group_validators(not_our_group, false); let v_non = that_group_validators[0]; @@ -346,7 +349,7 @@ fn statement_from_non_cluster_originator_unexpected() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -355,12 +358,13 @@ fn statement_from_non_cluster_originator_unexpected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer(&mut overseer, peer_a.clone(), None).await; @@ -408,7 +412,7 @@ fn seconded_statement_leads_to_request() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -417,7 +421,8 @@ fn seconded_statement_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -432,7 +437,7 @@ fn seconded_statement_leads_to_request() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; connect_peer( @@ -503,7 +508,7 @@ fn cluster_statements_shared_seconded_first() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -512,7 +517,8 @@ fn cluster_statements_shared_seconded_first() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -528,7 +534,7 @@ fn cluster_statements_shared_seconded_first() { // peer A is in group, no relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -624,7 +630,7 @@ fn cluster_accounts_for_implicit_view() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -634,7 +640,8 @@ fn cluster_accounts_for_implicit_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -651,7 +658,7 @@ fn cluster_accounts_for_implicit_view() { // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -775,7 +782,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -784,7 +791,8 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -799,7 +807,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; { connect_peer( @@ -907,7 +915,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -916,7 +924,8 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -931,7 +940,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; { connect_peer( @@ -1048,7 +1057,7 @@ fn ensure_seconding_limit_is_respected() { let config = TestConfig { validator_count: 20, group_size: 4, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: Some(AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3, @@ -1060,7 +1069,8 @@ fn ensure_seconding_limit_is_respected() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1092,7 +1102,7 @@ fn ensure_seconding_limit_is_respected() { let candidate_hash_2 = candidate_2.hash(); let candidate_hash_3 = candidate_3.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; // peers A,B,C are in group, have relay parent in view. diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 5b1dabfc8a0..9802db06082 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -29,7 +29,7 @@ fn backed_candidate_leads_to_advertisement() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -41,7 +41,8 @@ fn backed_candidate_leads_to_advertisement() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -55,9 +56,9 @@ fn backed_candidate_leads_to_advertisement() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -219,7 +220,7 @@ fn backed_candidate_leads_to_advertisement() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -244,7 +245,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -256,9 +257,9 @@ fn received_advertisement_before_confirmation_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -273,7 +274,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = state.group_validators(other_group, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -424,7 +425,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -435,9 +436,9 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -672,7 +673,7 @@ fn received_advertisement_after_confirmation_before_backing() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -683,9 +684,9 @@ fn received_advertisement_after_confirmation_before_backing() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -858,7 +859,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -869,9 +870,9 @@ fn additional_statements_are_shared_after_manifest_exchange() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1155,7 +1156,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1167,7 +1168,8 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1181,9 +1183,9 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1336,7 +1338,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { let expected_manifest = BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1377,7 +1379,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1389,7 +1391,8 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1403,9 +1406,9 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1567,7 +1570,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1599,7 +1602,7 @@ fn grid_statements_imported_to_backing() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1610,9 +1613,9 @@ fn grid_statements_imported_to_backing() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1803,7 +1806,7 @@ fn advertisements_rejected_from_incorrect_peers() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1815,9 +1818,9 @@ fn advertisements_rejected_from_incorrect_peers() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1832,12 +1835,12 @@ fn advertisements_rejected_from_incorrect_peers() { ); let candidate_hash = candidate.hash(); - let target_group_validators = state.group_validators(local_validator.group_index, true); - let other_group_validators = state.group_validators(other_group, true); - let v_a = target_group_validators[0]; - let v_b = target_group_validators[1]; - let v_c = other_group_validators[0]; - let v_d = other_group_validators[1]; + let other_group_validators = state.group_validators(local_group_index, true); + let target_group_validators = state.group_validators(other_group, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. @@ -1948,7 +1951,7 @@ fn manifest_rejected_with_unknown_relay_parent() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1959,9 +1962,9 @@ fn manifest_rejected_with_unknown_relay_parent() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -2054,7 +2057,7 @@ fn manifest_rejected_when_not_a_validator() { let config = TestConfig { validator_count, group_size, - local_validator: false, + local_validator: LocalRole::None, async_backing_params: None, }; @@ -2156,7 +2159,7 @@ fn manifest_rejected_when_group_does_not_match_para() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -2166,9 +2169,9 @@ fn manifest_rejected_when_group_does_not_match_para() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); // Create a mismatch between group and para. let other_para = next_group_index(other_group, validator_count, group_size); let other_para = ParaId::from(other_para.0); @@ -2263,7 +2266,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -2274,9 +2277,9 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -2454,3 +2457,141 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { overseer }); } + +#[test] +fn inactive_local_participates_in_grid() { + let validator_count = 11; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: LocalRole::InactiveValidator, + async_backing_params: None, + }; + + let dummy_relay_parent = Hash::repeat_byte(2); + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + assert_eq!(local_validator.validator_index.0, validator_count as u32); + + let group_idx = GroupIndex::from(0); + let para = ParaId::from(0); + + // Dummy leaf is needed to update topology. + let dummy_leaf = state.make_dummy_leaf(Hash::repeat_byte(2)); + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + para, + test_leaf.para_data(para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let first_group = state.group_validators(group_idx, true); + let v_a = first_group.last().unwrap().clone(); + let v_b = first_group.first().unwrap().clone(); + + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &dummy_leaf, &state, true).await; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(dummy_relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + activate_leaf(&mut overseer, &test_leaf, &state, false).await; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Receive an advertisement from A. + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: group_idx, + para_id: para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }; + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .await; + + let statements = vec![ + state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + // Inactive node requests this candidate. + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + for _ in 0..2 { + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT.into() => { } + ); + } + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } + ); + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + }); +} diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 4150377a0c6..4e626977524 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -61,19 +61,30 @@ const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParams = // Some deterministic genesis hash for req/res protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); +#[derive(Debug, Copy, Clone)] +enum LocalRole { + /// Active validator. + Validator, + /// Authority, not in active validator set. + InactiveValidator, + /// Not a validator. + None, +} + struct TestConfig { + // number of active validators. validator_count: usize, // how many validators to place in each group. group_size: usize, // whether the local node should be a validator - local_validator: bool, + local_validator: LocalRole, async_backing_params: Option, } #[derive(Debug, Clone)] struct TestLocalValidator { validator_index: ValidatorIndex, - group_index: GroupIndex, + group_index: Option, } struct TestState { @@ -99,7 +110,7 @@ impl TestState { let mut assignment_keys = Vec::new(); let mut validator_groups = Vec::new(); - let local_validator_pos = if config.local_validator { + let local_validator_pos = if let LocalRole::Validator = config.local_validator { // ensure local validator is always in a full group. Some(rng.gen_range(0..config.validator_count).saturating_sub(config.group_size - 1)) } else { @@ -128,13 +139,19 @@ impl TestState { } } - let local = if let Some(local_pos) = local_validator_pos { - Some(TestLocalValidator { + let local = match (config.local_validator, local_validator_pos) { + (LocalRole::Validator, Some(local_pos)) => Some(TestLocalValidator { validator_index: ValidatorIndex(local_pos as _), - group_index: GroupIndex((local_pos / config.group_size) as _), - }) - } else { - None + group_index: Some(GroupIndex((local_pos / config.group_size) as _)), + }), + (LocalRole::InactiveValidator, None) => { + discovery_keys.push(AuthorityDiscoveryPair::generate().0.public()); + Some(TestLocalValidator { + validator_index: ValidatorIndex(config.validator_count as u32), + group_index: None, + }) + }, + _ => None, }; let validator_public = validator_pubkeys(&validators); @@ -181,15 +198,23 @@ impl TestState { fn make_dummy_topology(&self) -> NewGossipTopology { let validator_count = self.config.validator_count; + let is_local_inactive = matches!(self.config.local_validator, LocalRole::InactiveValidator); + + let mut indices: Vec = (0..validator_count).collect(); + if is_local_inactive { + indices.push(validator_count); + } + NewGossipTopology { session: 1, topology: SessionGridTopology::new( - (0..validator_count).collect(), - (0..validator_count) + indices.clone(), + indices + .into_iter() .map(|i| TopologyPeerInfo { peer_ids: Vec::new(), validator_index: ValidatorIndex(i as u32), - discovery_id: AuthorityDiscoveryPair::generate().0.public(), + discovery_id: self.session_info.discovery_keys[i].clone(), }) .collect(), ), @@ -276,7 +301,7 @@ fn test_harness>( test: impl FnOnce(TestState, VirtualOverseer) -> T, ) { let pool = sp_core::testing::TaskExecutor::new(); - let keystore = if config.local_validator { + let keystore = if let LocalRole::Validator = config.local_validator { test_helpers::mock::make_ferdie_keystore() } else { Arc::new(LocalKeystore::in_memory()) as KeystorePtr diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 4734d7a0f96..1eec8290fab 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -32,7 +32,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -43,7 +43,8 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -57,7 +58,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -188,7 +189,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: Some(AsyncBackingParams { // Makes `seconding_limit: 2` (easier to hit the limit). max_candidate_depth: 1, @@ -203,9 +204,9 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -475,7 +476,7 @@ fn peer_reported_for_not_enough_statements() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -486,9 +487,9 @@ fn peer_reported_for_not_enough_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -670,7 +671,7 @@ fn peer_reported_for_duplicate_statements() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -681,7 +682,8 @@ fn peer_reported_for_duplicate_statements() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -695,7 +697,7 @@ fn peer_reported_for_duplicate_statements() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -830,7 +832,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -841,7 +843,8 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -855,8 +858,8 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - state.group_validators((local_validator.group_index.0 + 1).into(), true); + let other_group_validators = state.group_validators(local_group_index, true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; @@ -968,7 +971,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { let config = TestConfig { validator_count: 20, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -979,7 +982,8 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -993,9 +997,8 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); - let next_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + let other_group_validators = state.group_validators(local_group_index, true); + let next_group_validators = state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_c = next_group_validators[0]; @@ -1105,7 +1108,7 @@ fn local_node_sanity_checks_incoming_requests() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1117,7 +1120,8 @@ fn local_node_sanity_checks_incoming_requests() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1135,7 +1139,7 @@ fn local_node_sanity_checks_incoming_requests() { // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. { - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -1311,7 +1315,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { let config = TestConfig { validator_count: 20, group_size: 3, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1321,7 +1325,8 @@ fn local_node_checks_that_peer_can_request_before_responding() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1336,7 +1341,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { let candidate_hash = candidate.hash(); // Peers A and B are in group and have relay parent in view. - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); connect_peer( &mut overseer, @@ -1515,7 +1520,7 @@ fn local_node_respects_statement_mask() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1527,7 +1532,8 @@ fn local_node_respects_statement_mask() { test_harness(config, |mut state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); let test_leaf = state.make_dummy_leaf(relay_parent); @@ -1541,9 +1547,9 @@ fn local_node_respects_statement_mask() { ); let candidate_hash = candidate.hash(); - let other_group_validators = state.group_validators(local_validator.group_index, true); + let other_group_validators = state.group_validators(local_group_index, true); let target_group_validators = - state.group_validators((local_validator.group_index.0 + 1).into(), true); + state.group_validators((local_group_index.0 + 1).into(), true); let v_a = other_group_validators[0]; let v_b = other_group_validators[1]; let v_c = target_group_validators[0]; @@ -1707,7 +1713,7 @@ fn local_node_respects_statement_mask() { assert_eq!(manifest, BackedCandidateManifest { relay_parent, candidate_hash, - group_index: local_validator.group_index, + group_index: local_group_index, para_id: local_para, parent_head_data_hash: pvd.parent_head.hash(), statement_knowledge: StatementFilter { @@ -1761,7 +1767,7 @@ fn should_delay_before_retrying_dropped_requests() { let config = TestConfig { validator_count, group_size, - local_validator: true, + local_validator: LocalRole::Validator, async_backing_params: None, }; @@ -1772,9 +1778,9 @@ fn should_delay_before_retrying_dropped_requests() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); - let other_group = - next_group_index(local_validator.group_index, validator_count, group_size); + let other_group = next_group_index(local_group_index, validator_count, group_size); let other_para = ParaId::from(other_group.0); let test_leaf = state.make_dummy_leaf(relay_parent); -- GitLab From fc12f435e3796d33018aab93a9e8cd851d4431d5 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Tue, 14 Nov 2023 20:48:32 +0200 Subject: [PATCH 054/199] add NodeFeatures field to HostConfiguration and runtime API (#2177) Adds a `NodeFeatures` bitfield value to the runtime `HostConfiguration`, with the purpose of coordinating the enabling of node-side features, such as: https://github.com/paritytech/polkadot-sdk/issues/628 and https://github.com/paritytech/polkadot-sdk/issues/598. These are features that require all validators enable them at the same time, assuming all/most nodes have upgraded their node versions. This PR doesn't add any feature yet. These are coming in future PRs. Also adds a runtime API for querying the state of the client features and an extrinsic for setting/unsetting a feature by its index in the bitfield. Note: originally part of: https://github.com/paritytech/polkadot-sdk/pull/1644, but posted as standalone to be reused by other PRs until the initial PR is merged --- Cargo.lock | 1 + .../src/blockchain_rpc_client.rs | 5 + .../src/rpc_client.rs | 12 +- .../emulated/chains/relays/rococo/src/lib.rs | 2 +- .../emulated/chains/relays/westend/src/lib.rs | 2 +- polkadot/node/core/runtime-api/src/cache.rs | 20 +- polkadot/node/core/runtime-api/src/lib.rs | 23 +- polkadot/node/core/runtime-api/src/tests.rs | 16 +- polkadot/node/subsystem-types/src/messages.rs | 17 +- .../subsystem-types/src/runtime_client.rs | 22 +- .../node/subsystem-util/src/runtime/mod.rs | 33 ++- polkadot/primitives/Cargo.toml | 2 +- polkadot/primitives/src/runtime_api.rs | 15 +- polkadot/primitives/src/vstaging/mod.rs | 5 + .../runtime/parachains/src/configuration.rs | 53 +++- .../src/configuration/benchmarking.rs | 2 + .../parachains/src/configuration/migration.rs | 1 + .../src/configuration/migration/v10.rs | 277 ++++++++++++++++++ .../src/configuration/migration/v9.rs | 107 ++++++- .../parachains/src/configuration/tests.rs | 8 + .../src/runtime_api_impl/vstaging.rs | 9 +- polkadot/runtime/rococo/src/lib.rs | 17 +- .../runtime_parachains_configuration.rs | 86 +++--- polkadot/runtime/westend/src/lib.rs | 19 +- .../runtime_parachains_configuration.rs | 86 +++--- 25 files changed, 709 insertions(+), 131 deletions(-) create mode 100644 polkadot/runtime/parachains/src/configuration/migration/v10.rs diff --git a/Cargo.lock b/Cargo.lock index d8308086822..196f580e0d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1527,6 +1527,7 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", + "serde", "tap", "wyz", ] diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index 1e78df71154..a473b3bced0 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -24,6 +24,7 @@ use polkadot_overseer::RuntimeApiSubsystemClient; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, slashing, + vstaging::NodeFeatures, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sp_api::{ApiError, RuntimeApiInfo}; @@ -364,6 +365,10 @@ impl RuntimeApiSubsystemClient for BlockChainRpcClient { ) -> Result, ApiError> { Ok(self.rpc_client.parachain_host_para_backing_state(at, para_id).await?) } + + async fn node_features(&self, at: Hash) -> Result { + Ok(self.rpc_client.parachain_host_node_features(at).await?) + } } #[async_trait::async_trait] diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index 90af334e133..cc993c6ff9f 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -31,7 +31,9 @@ use parity_scale_codec::{Decode, Encode}; use cumulus_primitives_core::{ relay_chain::{ async_backing::{AsyncBackingParams, BackingState}, - slashing, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + slashing, + vstaging::NodeFeatures, + BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash as RelayHash, Header as RelayHeader, InboundHrmpMessage, OccupiedCoreAssumption, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, @@ -597,6 +599,14 @@ impl RelayChainRpcClient { .await } + pub async fn parachain_host_node_features( + &self, + at: RelayHash, + ) -> Result { + self.call_remote_runtime_function("ParachainHost_node_features", at, None::<()>) + .await + } + pub async fn parachain_host_disabled_validators( &self, at: RelayHash, diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs index f806f4a5d9e..7ace9614710 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/lib.rs @@ -24,7 +24,7 @@ use emulated_integration_tests_common::{ // Rococo declaration decl_test_relay_chains! { - #[api_version(8)] + #[api_version(9)] pub struct Rococo { genesis = genesis::genesis(), on_init = (), diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs index d4ba1b6cfe7..2ba47250d56 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/lib.rs @@ -24,7 +24,7 @@ use emulated_integration_tests_common::{ // Westend declaration decl_test_relay_chains! { - #[api_version(8)] + #[api_version(9)] pub struct Westend { genesis = genesis::genesis(), on_init = (), diff --git a/polkadot/node/core/runtime-api/src/cache.rs b/polkadot/node/core/runtime-api/src/cache.rs index 69eea22b23b..8a7a3dc08b8 100644 --- a/polkadot/node/core/runtime-api/src/cache.rs +++ b/polkadot/node/core/runtime-api/src/cache.rs @@ -20,7 +20,7 @@ use schnellru::{ByLength, LruMap}; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, + async_backing, slashing, vstaging, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, @@ -67,6 +67,7 @@ pub(crate) struct RequestResultCache { disabled_validators: LruMap>, para_backing_state: LruMap<(Hash, ParaId), Option>, async_backing_params: LruMap, + node_features: LruMap, } impl Default for RequestResultCache { @@ -100,6 +101,7 @@ impl Default for RequestResultCache { disabled_validators: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), para_backing_state: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), async_backing_params: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), + node_features: LruMap::new(ByLength::new(DEFAULT_CACHE_CAP)), } } } @@ -446,6 +448,21 @@ impl RequestResultCache { self.minimum_backing_votes.insert(session_index, minimum_backing_votes); } + pub(crate) fn node_features( + &mut self, + session_index: SessionIndex, + ) -> Option<&vstaging::NodeFeatures> { + self.node_features.get(&session_index).map(|f| &*f) + } + + pub(crate) fn cache_node_features( + &mut self, + session_index: SessionIndex, + features: vstaging::NodeFeatures, + ) { + self.node_features.insert(session_index, features); + } + pub(crate) fn disabled_validators( &mut self, relay_parent: &Hash, @@ -540,4 +557,5 @@ pub(crate) enum RequestResult { DisabledValidators(Hash, Vec), ParaBackingState(Hash, ParaId, Option), AsyncBackingParams(Hash, async_backing::AsyncBackingParams), + NodeFeatures(SessionIndex, vstaging::NodeFeatures), } diff --git a/polkadot/node/core/runtime-api/src/lib.rs b/polkadot/node/core/runtime-api/src/lib.rs index bdcca08b10d..8689355c413 100644 --- a/polkadot/node/core/runtime-api/src/lib.rs +++ b/polkadot/node/core/runtime-api/src/lib.rs @@ -173,6 +173,8 @@ where .cache_para_backing_state((relay_parent, para_id), constraints), AsyncBackingParams(relay_parent, params) => self.requests_cache.cache_async_backing_params(relay_parent, params), + NodeFeatures(session_index, params) => + self.requests_cache.cache_node_features(session_index, params), } } @@ -313,6 +315,15 @@ where Some(Request::MinimumBackingVotes(index, sender)) } }, + Request::NodeFeatures(index, sender) => { + if let Some(value) = self.requests_cache.node_features(index) { + self.metrics.on_cached_request(); + let _ = sender.send(Ok(value.clone())); + None + } else { + Some(Request::NodeFeatures(index, sender)) + } + }, } } @@ -408,6 +419,9 @@ where macro_rules! query { ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr) => {{ + query!($req_variant, $api_name($($param),*), ver = $version, $sender, result = ( relay_parent $(, $param )* ) ) + }}; + ($req_variant:ident, $api_name:ident ($($param:expr),*), ver = $version:expr, $sender:expr, result = ( $($results:expr),* ) ) => {{ let sender = $sender; let version: u32 = $version; // enforce type for the version expression let runtime_version = client.api_version_parachain_host(relay_parent).await @@ -441,7 +455,7 @@ where metrics.on_request(res.is_ok()); let _ = sender.send(res.clone()); - res.ok().map(|res| RequestResult::$req_variant(relay_parent, $( $param, )* res)) + res.ok().map(|res| RequestResult::$req_variant($( $results, )* res)) }} } @@ -591,5 +605,12 @@ where sender ) }, + Request::NodeFeatures(index, sender) => query!( + NodeFeatures, + node_features(), + ver = Request::NODE_FEATURES_RUNTIME_REQUIREMENT, + sender, + result = (index) + ), } } diff --git a/polkadot/node/core/runtime-api/src/tests.rs b/polkadot/node/core/runtime-api/src/tests.rs index 979b3587d26..b939bffb0e7 100644 --- a/polkadot/node/core/runtime-api/src/tests.rs +++ b/polkadot/node/core/runtime-api/src/tests.rs @@ -20,12 +20,12 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, ValidatorId, - ValidatorIndex, ValidatorSignature, + async_backing, slashing, vstaging::NodeFeatures, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, + DisputeState, ExecutorParams, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, Slot, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sp_api::ApiError; use sp_core::testing::TaskExecutor; @@ -269,6 +269,10 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { todo!("Not required for tests") } + async fn node_features(&self, _: Hash) -> Result { + todo!("Not required for tests") + } + async fn disabled_validators(&self, _: Hash) -> Result, ApiError> { todo!("Not required for tests") } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 4ddffc6dc5e..43456daec30 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -42,12 +42,12 @@ use polkadot_node_primitives::{ ValidationResult, }; use polkadot_primitives::{ - async_backing, slashing, AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, - CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, - CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, - Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, + async_backing, slashing, vstaging::NodeFeatures, AuthorityDiscoveryId, BackedCandidate, + BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, + CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex, + GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -718,6 +718,8 @@ pub enum RuntimeApiRequest { /// /// If it's not supported by the Runtime, the async backing is said to be disabled. AsyncBackingParams(RuntimeApiSender), + /// Get the node features. + NodeFeatures(SessionIndex, RuntimeApiSender), } impl RuntimeApiRequest { @@ -746,6 +748,9 @@ impl RuntimeApiRequest { /// `DisabledValidators` pub const DISABLED_VALIDATORS_RUNTIME_REQUIREMENT: u32 = 8; + + /// `Node features` + pub const NODE_FEATURES_RUNTIME_REQUIREMENT: u32 = 9; } /// A message to the Runtime API subsystem. diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index f7adcf9862b..8369fd215f4 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -16,12 +16,12 @@ use async_trait::async_trait; use polkadot_primitives::{ - async_backing, runtime_api::ParachainHost, slashing, Block, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, + async_backing, runtime_api::ParachainHost, slashing, vstaging, Block, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, + DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; @@ -257,8 +257,14 @@ pub trait RuntimeApiSubsystemClient { ) -> Result, ApiError>; // === v8 === + /// Gets the disabled validators at a specific block height async fn disabled_validators(&self, at: Hash) -> Result, ApiError>; + + // === v9 === + + /// Get the node features. + async fn node_features(&self, at: Hash) -> Result; } /// Default implementation of [`RuntimeApiSubsystemClient`] using the client. @@ -508,6 +514,10 @@ where self.client.runtime_api().async_backing_params(at) } + async fn node_features(&self, at: Hash) -> Result { + self.client.runtime_api().node_features(at) + } + async fn disabled_validators(&self, at: Hash) -> Result, ApiError> { self.client.runtime_api().disabled_validators(at) } diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 8d7cef88a70..aada7a5d77a 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -30,8 +30,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_types::UnpinHandle; use polkadot_primitives::{ - slashing, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, EncodeAs, - ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, + slashing, vstaging::NodeFeatures, AsyncBackingParams, CandidateEvent, CandidateHash, CoreState, + EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, IndexedVec, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, LEGACY_MIN_BACKING_VOTES, }; @@ -507,3 +507,32 @@ pub async fn request_min_backing_votes( min_backing_votes_res } } + +/// Request the node features enabled in the runtime. +/// Pass in the session index for caching purposes, as it should only change on session boundaries. +/// Prior to runtime API version 9, just return `None`. +pub async fn request_node_features( + parent: Hash, + session_index: SessionIndex, + sender: &mut impl overseer::SubsystemSender, +) -> Result> { + let res = recv_runtime( + request_from_runtime(parent, sender, |tx| { + RuntimeApiRequest::NodeFeatures(session_index, tx) + }) + .await, + ) + .await; + + if let Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) = res { + gum::trace!( + target: LOG_TARGET, + ?parent, + "Querying the node features from the runtime is not supported by the current Runtime API", + ); + + Ok(None) + } else { + res.map(Some) + } +} diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index b318c2d4be7..316644a372d 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true description = "Shared primitives used by Polkadot runtime" [dependencies] -bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } +bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "serde"] } hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index 5ec897c8cbb..e4c1d590f45 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -114,10 +114,10 @@ //! separated from the stable primitives. use crate::{ - async_backing, slashing, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, - CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, - GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, + async_backing, slashing, vstaging, AsyncBackingParams, BlockNumber, CandidateCommitments, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; use parity_scale_codec::{Decode, Encode}; @@ -264,5 +264,12 @@ sp_api::decl_runtime_apis! { /// Returns a list of all disabled validators at the given block. #[api_version(8)] fn disabled_validators() -> Vec; + + /***** Added in v9 *****/ + + /// Get node features. + /// This is a staging method! Do not use on production runtimes! + #[api_version(9)] + fn node_features() -> vstaging::NodeFeatures; } } diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 1429b0c326a..083e0f42d56 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -17,3 +17,8 @@ //! Staging Primitives. // Put any primitives used by staging APIs functions here + +use bitvec::vec::BitVec; + +/// Bit indices in the `HostConfiguration.node_features` that correspond to different node features. +pub type NodeFeatures = BitVec; diff --git a/polkadot/runtime/parachains/src/configuration.rs b/polkadot/runtime/parachains/src/configuration.rs index d85c267496f..bff9cc34b4f 100644 --- a/polkadot/runtime/parachains/src/configuration.rs +++ b/polkadot/runtime/parachains/src/configuration.rs @@ -26,8 +26,8 @@ use polkadot_parachain_primitives::primitives::{ MAX_HORIZONTAL_MESSAGE_NUM, MAX_UPWARD_MESSAGE_NUM, }; use primitives::{ - AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, SessionIndex, - LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, + vstaging::NodeFeatures, AsyncBackingParams, Balance, ExecutorParamError, ExecutorParams, + SessionIndex, LEGACY_MIN_BACKING_VOTES, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE, ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, }; use sp_runtime::{traits::Zero, Perbill}; @@ -261,6 +261,8 @@ pub struct HostConfiguration { /// The minimum number of valid backing statements required to consider a parachain candidate /// backable. pub minimum_backing_votes: u32, + /// Node features enablement. + pub node_features: NodeFeatures, } impl> Default for HostConfiguration { @@ -312,6 +314,7 @@ impl> Default for HostConfiguration Weight; fn set_config_with_executor_params() -> Weight; fn set_config_with_perbill() -> Weight; + fn set_node_feature() -> Weight; } pub struct TestWeightInfo; @@ -488,6 +492,9 @@ impl WeightInfo for TestWeightInfo { fn set_config_with_perbill() -> Weight { Weight::MAX } + fn set_node_feature() -> Weight { + Weight::MAX + } } #[frame_support::pallet] @@ -496,18 +503,19 @@ pub mod pallet { /// The current storage version. /// - /// v0-v1: - /// v1-v2: - /// v2-v3: - /// v3-v4: - /// v4-v5: - /// + - /// + - /// v5-v6: (remove UMP dispatch queue) - /// v6-v7: - /// v7-v8: - /// v8-v9: - const STORAGE_VERSION: StorageVersion = StorageVersion::new(9); + /// v0-v1: + /// v1-v2: + /// v2-v3: + /// v3-v4: + /// v4-v5: + /// + + /// + + /// v5-v6: (remove UMP dispatch queue) + /// v6-v7: + /// v7-v8: + /// v8-v9: + /// v9-v10: + const STORAGE_VERSION: StorageVersion = StorageVersion::new(10); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -1195,6 +1203,23 @@ pub mod pallet { config.minimum_backing_votes = new; }) } + /// Set/Unset a node feature. + #[pallet::call_index(53)] + #[pallet::weight(( + T::WeightInfo::set_node_feature(), + DispatchClass::Operational + ))] + pub fn set_node_feature(origin: OriginFor, index: u8, value: bool) -> DispatchResult { + ensure_root(origin)?; + + Self::schedule_config_update(|config| { + let index = usize::from(index); + if config.node_features.len() <= index { + config.node_features.resize(index + 1, false); + } + config.node_features.set(index, value); + }) + } } #[pallet::hooks] diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index d9d11ab56e4..508e0579a09 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -49,6 +49,8 @@ benchmarks! { set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)) + set_node_feature{}: set_node_feature(RawOrigin::Root, 255, true) + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext(Default::default()), diff --git a/polkadot/runtime/parachains/src/configuration/migration.rs b/polkadot/runtime/parachains/src/configuration/migration.rs index 26f8a85b496..db323d3aad9 100644 --- a/polkadot/runtime/parachains/src/configuration/migration.rs +++ b/polkadot/runtime/parachains/src/configuration/migration.rs @@ -16,6 +16,7 @@ //! A module that is responsible for migration of storage. +pub mod v10; pub mod v6; pub mod v7; pub mod v8; diff --git a/polkadot/runtime/parachains/src/configuration/migration/v10.rs b/polkadot/runtime/parachains/src/configuration/migration/v10.rs new file mode 100644 index 00000000000..3c934082dc1 --- /dev/null +++ b/polkadot/runtime/parachains/src/configuration/migration/v10.rs @@ -0,0 +1,277 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::configuration::{self, Config, Pallet}; +use frame_support::{pallet_prelude::*, traits::Defensive, weights::Weight}; +use frame_system::pallet_prelude::BlockNumberFor; +use primitives::{vstaging::NodeFeatures, SessionIndex}; +use sp_std::vec::Vec; + +use frame_support::traits::OnRuntimeUpgrade; + +use super::v9::V9HostConfiguration; + +type V10HostConfiguration = configuration::HostConfiguration; + +mod v9 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V9HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V9HostConfiguration>)>, + OptionQuery, + >; +} + +mod v10 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type ActiveConfig = + StorageValue, V10HostConfiguration>, OptionQuery>; + + #[frame_support::storage_alias] + pub(crate) type PendingConfigs = StorageValue< + Pallet, + Vec<(SessionIndex, V10HostConfiguration>)>, + OptionQuery, + >; +} + +pub struct VersionUncheckedMigrateToV10(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for VersionUncheckedMigrateToV10 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running pre_upgrade() for HostConfiguration MigrateToV10"); + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + migrate_to_v10::() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { + log::trace!(target: crate::configuration::LOG_TARGET, "Running post_upgrade() for HostConfiguration MigrateToV10"); + ensure!( + Pallet::::on_chain_storage_version() >= StorageVersion::new(10), + "Storage version should be >= 10 after the migration" + ); + + Ok(()) + } +} + +pub type MigrateToV10 = frame_support::migrations::VersionedMigration< + 9, + 10, + VersionUncheckedMigrateToV10, + Pallet, + ::DbWeight, +>; + +// Unusual formatting is justified: +// - make it easier to verify that fields assign what they supposed to assign. +// - this code is transient and will be removed after all migrations are done. +// - this code is important enough to optimize for legibility sacrificing consistency. +#[rustfmt::skip] +fn translate(pre: V9HostConfiguration>) -> V10HostConfiguration> { + V10HostConfiguration { + max_code_size : pre.max_code_size, + max_head_data_size : pre.max_head_data_size, + max_upward_queue_count : pre.max_upward_queue_count, + max_upward_queue_size : pre.max_upward_queue_size, + max_upward_message_size : pre.max_upward_message_size, + max_upward_message_num_per_candidate : pre.max_upward_message_num_per_candidate, + hrmp_max_message_num_per_candidate : pre.hrmp_max_message_num_per_candidate, + validation_upgrade_cooldown : pre.validation_upgrade_cooldown, + validation_upgrade_delay : pre.validation_upgrade_delay, + max_pov_size : pre.max_pov_size, + max_downward_message_size : pre.max_downward_message_size, + hrmp_sender_deposit : pre.hrmp_sender_deposit, + hrmp_recipient_deposit : pre.hrmp_recipient_deposit, + hrmp_channel_max_capacity : pre.hrmp_channel_max_capacity, + hrmp_channel_max_total_size : pre.hrmp_channel_max_total_size, + hrmp_max_parachain_inbound_channels : pre.hrmp_max_parachain_inbound_channels, + hrmp_max_parachain_outbound_channels : pre.hrmp_max_parachain_outbound_channels, + hrmp_channel_max_message_size : pre.hrmp_channel_max_message_size, + code_retention_period : pre.code_retention_period, + on_demand_cores : pre.on_demand_cores, + on_demand_retries : pre.on_demand_retries, + group_rotation_frequency : pre.group_rotation_frequency, + paras_availability_period : pre.paras_availability_period, + scheduling_lookahead : pre.scheduling_lookahead, + max_validators_per_core : pre.max_validators_per_core, + max_validators : pre.max_validators, + dispute_period : pre.dispute_period, + dispute_post_conclusion_acceptance_period: pre.dispute_post_conclusion_acceptance_period, + no_show_slots : pre.no_show_slots, + n_delay_tranches : pre.n_delay_tranches, + zeroth_delay_tranche_width : pre.zeroth_delay_tranche_width, + needed_approvals : pre.needed_approvals, + relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, + pvf_voting_ttl : pre.pvf_voting_ttl, + minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, + async_backing_params : pre.async_backing_params, + executor_params : pre.executor_params, + on_demand_queue_max_size : pre.on_demand_queue_max_size, + on_demand_base_fee : pre.on_demand_base_fee, + on_demand_fee_variability : pre.on_demand_fee_variability, + on_demand_target_queue_utilization : pre.on_demand_target_queue_utilization, + on_demand_ttl : pre.on_demand_ttl, + minimum_backing_votes : pre.minimum_backing_votes, + node_features : NodeFeatures::EMPTY + } +} + +fn migrate_to_v10() -> Weight { + let v9 = v9::ActiveConfig::::get() + .defensive_proof("Could not decode old config") + .unwrap_or_default(); + let v10 = translate::(v9); + v10::ActiveConfig::::set(Some(v10)); + + // Allowed to be empty. + let pending_v9 = v9::PendingConfigs::::get().unwrap_or_default(); + let mut pending_v10 = Vec::new(); + + for (session, v9) in pending_v9.into_iter() { + let v10 = translate::(v9); + pending_v10.push((session, v10)); + } + v10::PendingConfigs::::set(Some(pending_v10.clone())); + + let num_configs = (pending_v10.len() + 1) as u64; + T::DbWeight::get().reads_writes(num_configs, num_configs) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{new_test_ext, Test}; + use primitives::LEGACY_MIN_BACKING_VOTES; + + #[test] + fn v10_deserialized_from_actual_data() { + // Example how to get new `raw_config`: + // We'll obtain the raw_config at a specified a block + // Steps: + // 1. Go to Polkadot.js -> Developer -> Chain state -> Storage: https://polkadot.js.org/apps/#/chainstate + // 2. Set these parameters: + // 2.1. selected state query: configuration; activeConfig(): + // PolkadotRuntimeParachainsConfigurationHostConfiguration + // 2.2. blockhash to query at: + // 0xf89d3ab5312c5f70d396dc59612f0aa65806c798346f9db4b35278baed2e0e53 (the hash of + // the block) + // 2.3. Note the value of encoded storage key -> + // 0x06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385 for the + // referenced block. + // 2.4. You'll also need the decoded values to update the test. + // 3. Go to Polkadot.js -> Developer -> Chain state -> Raw storage + // 3.1 Enter the encoded storage key and you get the raw config. + + // This exceeds the maximal line width length, but that's fine, since this is not code and + // doesn't need to be read and also leaving it as one line allows to easily copy it. + let raw_config = + hex_literal::hex![" + 0000300000800000080000000000100000c8000005000000050000000200000002000000000000000000000000005000000010000400000000000000000000000000000000000000000000000000000000000000000000000800000000200000040000000000100000b004000000000000000000001027000080b2e60e80c3c90180969800000000000000000000000000050000001400000004000000010000000101000000000600000064000000020000001900000000000000020000000200000002000000050000000200000000" + ]; + + let v10 = + V10HostConfiguration::::decode(&mut &raw_config[..]).unwrap(); + + // We check only a sample of the values here. If we missed any fields or messed up data + // types that would skew all the fields coming after. + assert_eq!(v10.max_code_size, 3_145_728); + assert_eq!(v10.validation_upgrade_cooldown, 2); + assert_eq!(v10.max_pov_size, 5_242_880); + assert_eq!(v10.hrmp_channel_max_message_size, 1_048_576); + assert_eq!(v10.n_delay_tranches, 25); + assert_eq!(v10.minimum_validation_upgrade_delay, 5); + assert_eq!(v10.group_rotation_frequency, 20); + assert_eq!(v10.on_demand_cores, 0); + assert_eq!(v10.on_demand_base_fee, 10_000_000); + assert_eq!(v10.minimum_backing_votes, LEGACY_MIN_BACKING_VOTES); + assert_eq!(v10.node_features, NodeFeatures::EMPTY); + } + + // Test that `migrate_to_v10`` correctly applies the `translate` function to current and pending + // configs. + #[test] + fn test_migrate_to_v10() { + // Host configuration has lots of fields. However, in this migration we only add one + // field. The most important part to check are a couple of the last fields. We also pick + // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and + // also their type. + // + // We specify only the picked fields and the rest should be provided by the `Default` + // implementation. That implementation is copied over between the two types and should work + // fine. + let v9 = V9HostConfiguration:: { + needed_approvals: 69, + paras_availability_period: 55, + hrmp_recipient_deposit: 1337, + max_pov_size: 1111, + minimum_validation_upgrade_delay: 20, + ..Default::default() + }; + + let mut pending_configs = Vec::new(); + pending_configs.push((100, v9.clone())); + pending_configs.push((300, v9.clone())); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v9 version in the state. + v9::ActiveConfig::::set(Some(v9.clone())); + v9::PendingConfigs::::set(Some(pending_configs)); + + migrate_to_v10::(); + + let v10 = translate::(v9); + let mut configs_to_check = v10::PendingConfigs::::get().unwrap(); + configs_to_check.push((0, v10::ActiveConfig::::get().unwrap())); + + for (_, config) in configs_to_check { + assert_eq!(config, v10); + assert_eq!(config.node_features, NodeFeatures::EMPTY); + } + }); + } + + // Test that migration doesn't panic in case there're no pending configurations upgrades in + // pallet's storage. + #[test] + fn test_migrate_to_v10_no_pending() { + let v9 = V9HostConfiguration::::default(); + + new_test_ext(Default::default()).execute_with(|| { + // Implant the v9 version in the state. + v9::ActiveConfig::::set(Some(v9)); + // Ensure there're no pending configs. + v9::PendingConfigs::::set(None); + + // Shouldn't fail. + migrate_to_v10::(); + }); + } +} diff --git a/polkadot/runtime/parachains/src/configuration/migration/v9.rs b/polkadot/runtime/parachains/src/configuration/migration/v9.rs index e37f0b9b0e3..ca4bbd9dace 100644 --- a/polkadot/runtime/parachains/src/configuration/migration/v9.rs +++ b/polkadot/runtime/parachains/src/configuration/migration/v9.rs @@ -23,13 +23,116 @@ use frame_support::{ weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; -use primitives::{SessionIndex, LEGACY_MIN_BACKING_VOTES}; +use primitives::{ + AsyncBackingParams, Balance, ExecutorParams, SessionIndex, LEGACY_MIN_BACKING_VOTES, + ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, +}; +use sp_runtime::Perbill; use sp_std::vec::Vec; use frame_support::traits::OnRuntimeUpgrade; use super::v8::V8HostConfiguration; -type V9HostConfiguration = configuration::HostConfiguration; +/// All configuration of the runtime with respect to paras. +#[derive(Clone, Encode, Decode, Debug)] +pub struct V9HostConfiguration { + pub max_code_size: u32, + pub max_head_data_size: u32, + pub max_upward_queue_count: u32, + pub max_upward_queue_size: u32, + pub max_upward_message_size: u32, + pub max_upward_message_num_per_candidate: u32, + pub hrmp_max_message_num_per_candidate: u32, + pub validation_upgrade_cooldown: BlockNumber, + pub validation_upgrade_delay: BlockNumber, + pub async_backing_params: AsyncBackingParams, + pub max_pov_size: u32, + pub max_downward_message_size: u32, + pub hrmp_max_parachain_outbound_channels: u32, + pub hrmp_sender_deposit: Balance, + pub hrmp_recipient_deposit: Balance, + pub hrmp_channel_max_capacity: u32, + pub hrmp_channel_max_total_size: u32, + pub hrmp_max_parachain_inbound_channels: u32, + pub hrmp_channel_max_message_size: u32, + pub executor_params: ExecutorParams, + pub code_retention_period: BlockNumber, + pub on_demand_cores: u32, + pub on_demand_retries: u32, + pub on_demand_queue_max_size: u32, + pub on_demand_target_queue_utilization: Perbill, + pub on_demand_fee_variability: Perbill, + pub on_demand_base_fee: Balance, + pub on_demand_ttl: BlockNumber, + pub group_rotation_frequency: BlockNumber, + pub paras_availability_period: BlockNumber, + pub scheduling_lookahead: u32, + pub max_validators_per_core: Option, + pub max_validators: Option, + pub dispute_period: SessionIndex, + pub dispute_post_conclusion_acceptance_period: BlockNumber, + pub no_show_slots: u32, + pub n_delay_tranches: u32, + pub zeroth_delay_tranche_width: u32, + pub needed_approvals: u32, + pub relay_vrf_modulo_samples: u32, + pub pvf_voting_ttl: SessionIndex, + pub minimum_validation_upgrade_delay: BlockNumber, + pub minimum_backing_votes: u32, +} + +impl> Default for V9HostConfiguration { + fn default() -> Self { + Self { + async_backing_params: AsyncBackingParams { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + group_rotation_frequency: 1u32.into(), + paras_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_cooldown: Default::default(), + validation_upgrade_delay: 2u32.into(), + code_retention_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + on_demand_cores: Default::default(), + on_demand_retries: Default::default(), + scheduling_lookahead: 1, + max_validators_per_core: Default::default(), + max_validators: None, + dispute_period: 6, + dispute_post_conclusion_acceptance_period: 100.into(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + pvf_voting_ttl: 2u32.into(), + minimum_validation_upgrade_delay: 2.into(), + executor_params: Default::default(), + on_demand_queue_max_size: ON_DEMAND_DEFAULT_QUEUE_MAX_SIZE, + on_demand_base_fee: 10_000_000u128, + on_demand_fee_variability: Perbill::from_percent(3), + on_demand_target_queue_utilization: Perbill::from_percent(25), + on_demand_ttl: 5u32.into(), + minimum_backing_votes: LEGACY_MIN_BACKING_VOTES, + } + } +} mod v8 { use super::*; diff --git a/polkadot/runtime/parachains/src/configuration/tests.rs b/polkadot/runtime/parachains/src/configuration/tests.rs index ea39628c958..b62a45355e1 100644 --- a/polkadot/runtime/parachains/src/configuration/tests.rs +++ b/polkadot/runtime/parachains/src/configuration/tests.rs @@ -16,6 +16,7 @@ use super::*; use crate::mock::{new_test_ext, Configuration, ParasShared, RuntimeOrigin, Test}; +use bitvec::{bitvec, prelude::Lsb0}; use frame_support::{assert_err, assert_noop, assert_ok}; fn on_new_session(session_index: SessionIndex) -> (HostConfiguration, HostConfiguration) { @@ -318,6 +319,7 @@ fn setting_pending_config_members() { on_demand_target_queue_utilization: Perbill::from_percent(25), on_demand_ttl: 5u32, minimum_backing_votes: 5, + node_features: bitvec![u8, Lsb0; 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], }; Configuration::set_validation_upgrade_cooldown( @@ -473,6 +475,12 @@ fn setting_pending_config_members() { new_config.minimum_backing_votes, ) .unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 1, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 1, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 3, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 10, true).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 10, false).unwrap(); + Configuration::set_node_feature(RuntimeOrigin::root(), 11, true).unwrap(); assert_eq!(PendingConfigs::::get(), vec![(shared::SESSION_DELAY, new_config)],); }) diff --git a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs index 24a076f3a44..200fd57915f 100644 --- a/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/polkadot/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,8 +16,8 @@ //! Put implementations of functions from staging APIs here. -use crate::shared; -use primitives::ValidatorIndex; +use crate::{configuration, initializer, shared}; +use primitives::{vstaging::NodeFeatures, ValidatorIndex}; use sp_std::{collections::btree_map::BTreeMap, prelude::Vec}; /// Implementation for `DisabledValidators` @@ -42,3 +42,8 @@ where .filter_map(|v| reverse_index.get(v).cloned()) .collect() } + +/// Returns the current state of the node features. +pub fn node_features() -> NodeFeatures { + >::config().node_features +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 40ef22107a7..5a1e170862e 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -23,11 +23,12 @@ use pallet_nis::WithMaximumOf; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionInfo, Signature, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, + slashing, vstaging::NodeFeatures, AccountId, AccountIndex, Balance, BlockNumber, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, + ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, + ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, claims, crowdloan, impl_runtime_weights, @@ -1499,6 +1500,7 @@ pub mod migrations { frame_support::migrations::RemovePallet::DbWeight>, pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, ); } @@ -1660,7 +1662,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(8)] + #[api_version(9)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1808,6 +1810,9 @@ sp_api::impl_runtime_apis! { parachains_staging_runtime_api_impl::disabled_validators::() } + fn node_features() -> NodeFeatures { + parachains_staging_runtime_api_impl::node_features::() + } } #[api_version(3)] diff --git a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs index 29f38765778..34541b83597 100644 --- a/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/rococo/src/weights/runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::configuration // --chain=rococo-dev -// --header=./file_header.txt -// --output=./runtime/rococo/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,11 +56,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_051_000 picoseconds. - Weight::from_parts(9_496_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_793_000 picoseconds. + Weight::from_parts(8_192_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -72,11 +72,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_104_000 picoseconds. - Weight::from_parts(9_403_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_819_000 picoseconds. + Weight::from_parts(8_004_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -88,11 +88,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_112_000 picoseconds. - Weight::from_parts(9_495_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_760_000 picoseconds. + Weight::from_parts(8_174_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,11 +114,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_011_000 picoseconds. - Weight::from_parts(9_460_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_814_000 picoseconds. + Weight::from_parts(8_098_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,11 +130,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_940_000 picoseconds. - Weight::from_parts(10_288_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_028_000 picoseconds. + Weight::from_parts(10_386_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -146,11 +146,27 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_perbill() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_192_000 picoseconds. - Weight::from_parts(9_595_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_867_000 picoseconds. + Weight::from_parts(8_191_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_158_000 picoseconds. + Weight::from_parts(10_430_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d80640c016f..d3862aff257 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -46,12 +46,12 @@ use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; use primitives::{ - slashing, AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupRotationInfo, Hash, - Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, Moment, Nonce, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionInfo, Signature, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, PARACHAIN_KEY_TYPE_ID, + slashing, vstaging::NodeFeatures, AccountId, AccountIndex, Balance, BlockNumber, + CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, + ExecutorParams, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, + PvfCheckStatement, ScrapedOnChainVotes, SessionInfo, Signature, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ assigned_slots, auctions, crowdloan, @@ -1559,6 +1559,7 @@ pub mod migrations { pallet_referenda::migration::v1::MigrateV0ToV1, pallet_nomination_pools::migration::versioned_migrations::V6ToV7, pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, ); } @@ -1699,7 +1700,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(8)] + #[api_version(9)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1846,6 +1847,10 @@ sp_api::impl_runtime_apis! { fn disabled_validators() -> Vec { parachains_staging_runtime_api_impl::disabled_validators::() } + + fn node_features() -> NodeFeatures { + parachains_staging_runtime_api_impl::node_features::() + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs index 585dc9058f2..3a4813b667c 100644 --- a/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs +++ b/polkadot/runtime/westend/src/weights/runtime_parachains_configuration.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `runtime_parachains::configuration` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-fljshgub-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -31,11 +31,11 @@ // --extrinsic=* // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=runtime_parachains::configuration // --chain=westend-dev -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,11 +56,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_block_number() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_616_000 picoseconds. - Weight::from_parts(9_961_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 8_065_000 picoseconds. + Weight::from_parts(8_389_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -72,11 +72,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_587_000 picoseconds. - Weight::from_parts(9_964_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 8_038_000 picoseconds. + Weight::from_parts(8_463_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -88,11 +88,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_option_u32() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_650_000 picoseconds. - Weight::from_parts(9_960_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_843_000 picoseconds. + Weight::from_parts(8_216_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -114,11 +114,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_balance() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_545_000 picoseconds. - Weight::from_parts(9_845_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_969_000 picoseconds. + Weight::from_parts(8_362_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -130,11 +130,11 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_executor_params() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 10_258_000 picoseconds. - Weight::from_parts(10_607_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_084_000 picoseconds. + Weight::from_parts(10_451_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -146,11 +146,27 @@ impl runtime_parachains::configuration::WeightInfo for /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn set_config_with_perbill() -> Weight { // Proof Size summary in bytes: - // Measured: `127` - // Estimated: `1612` - // Minimum execution time: 9_502_000 picoseconds. - Weight::from_parts(9_902_000, 0) - .saturating_add(Weight::from_parts(0, 1612)) + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_948_000 picoseconds. + Weight::from_parts(8_268_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_node_feature() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 10_257_000 picoseconds. + Weight::from_parts(10_584_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } -- GitLab From f517900a48ada81764e9d75d1e9d598ea4b84719 Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Tue, 14 Nov 2023 21:32:14 +0100 Subject: [PATCH 055/199] Contracts expose pallet-xcm (#1248) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR introduces: - XCM host functions `xcm_send`, `xcm_execute` - An Xcm trait into the config. that proxy these functions to to `pallet_xcm`, or disable their usage by using `()`. - A mock_network and xcm_test files to test the newly added xcm-related functions. --------- Co-authored-by: Keith Yeung Co-authored-by: Sasha Gryaznov Co-authored-by: command-bot <> Co-authored-by: Francisco Aguirre Co-authored-by: Alexander Theißen --- Cargo.lock | 43 +++ Cargo.toml | 1 + .../contracts-rococo/src/contracts.rs | 1 + polkadot/xcm/xcm-builder/src/controller.rs | 2 +- polkadot/xcm/xcm-builder/src/lib.rs | 2 +- substrate/bin/node/runtime/src/lib.rs | 1 + substrate/frame/contracts/Cargo.toml | 16 + .../contracts/fixtures/data/xcm_execute.wat | 52 +++ .../contracts/fixtures/data/xcm_send.wat | 59 +++ substrate/frame/contracts/fixtures/src/lib.rs | 2 + .../frame/contracts/mock-network/Cargo.toml | 91 +++++ .../frame/contracts/mock-network/src/lib.rs | 151 ++++++++ .../frame/contracts/mock-network/src/mocks.rs | 18 + .../mock-network/src/mocks/msg_queue.rs | 168 +++++++++ .../src/mocks/relay_message_queue.rs | 52 +++ .../contracts/mock-network/src/parachain.rs | 353 ++++++++++++++++++ .../src/parachain/contracts_config.rs | 98 +++++ .../contracts/mock-network/src/primitives.rs | 23 ++ .../contracts/mock-network/src/relay_chain.rs | 236 ++++++++++++ .../frame/contracts/mock-network/src/tests.rs | 239 ++++++++++++ substrate/frame/contracts/src/lib.rs | 10 + substrate/frame/contracts/src/tests.rs | 1 + substrate/frame/contracts/src/wasm/runtime.rs | 193 +++++++++- 23 files changed, 1797 insertions(+), 15 deletions(-) create mode 100644 substrate/frame/contracts/fixtures/data/xcm_execute.wat create mode 100644 substrate/frame/contracts/fixtures/data/xcm_send.wat create mode 100644 substrate/frame/contracts/mock-network/Cargo.toml create mode 100644 substrate/frame/contracts/mock-network/src/lib.rs create mode 100644 substrate/frame/contracts/mock-network/src/mocks.rs create mode 100644 substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs create mode 100644 substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs create mode 100644 substrate/frame/contracts/mock-network/src/parachain.rs create mode 100644 substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs create mode 100644 substrate/frame/contracts/mock-network/src/primitives.rs create mode 100644 substrate/frame/contracts/mock-network/src/relay_chain.rs create mode 100644 substrate/frame/contracts/mock-network/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 196f580e0d5..9ae1bb905b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9699,11 +9699,13 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", + "pallet-assets", "pallet-balances", "pallet-contracts-fixtures", "pallet-contracts-primitives", "pallet-contracts-proc-macro", "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", "pallet-proxy", "pallet-timestamp", "pallet-utility", @@ -9720,6 +9722,9 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-std 8.0.0", + "sp-tracing 10.0.0", + "staging-xcm", + "staging-xcm-builder", "wasm-instrument 0.4.0", "wasmi", "wat", @@ -9734,6 +9739,44 @@ dependencies = [ "wat", ] +[[package]] +name = "pallet-contracts-mock-network" +version = "1.0.0" +dependencies = [ + "assert_matches", + "frame-support", + "frame-system", + "pallet-assets", + "pallet-balances", + "pallet-contracts", + "pallet-contracts-fixtures", + "pallet-contracts-primitives", + "pallet-contracts-proc-macro", + "pallet-insecure-randomness-collective-flip", + "pallet-message-queue", + "pallet-proxy", + "pallet-timestamp", + "pallet-utility", + "pallet-xcm", + "parity-scale-codec", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-parachains", + "pretty_assertions", + "scale-info", + "sp-api", + "sp-core", + "sp-io", + "sp-keystore", + "sp-runtime", + "sp-std 8.0.0", + "sp-tracing 10.0.0", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "xcm-simulator", +] + [[package]] name = "pallet-contracts-primitives" version = "24.0.0" diff --git a/Cargo.toml b/Cargo.toml index 15a7d5c35bf..f9779681cae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -293,6 +293,7 @@ members = [ "substrate/frame/contracts/fixtures", "substrate/frame/contracts/primitives", "substrate/frame/contracts/proc-macro", + "substrate/frame/contracts/mock-network", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", "substrate/frame/democracy", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs index b86f797ee03..6c100deaa9e 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/contracts.rs @@ -72,4 +72,5 @@ impl Config for Runtime { type RuntimeHoldReason = RuntimeHoldReason; type Debug = (); type Environment = (); + type Xcm = pallet_xcm::Pallet; } diff --git a/polkadot/xcm/xcm-builder/src/controller.rs b/polkadot/xcm/xcm-builder/src/controller.rs index 0ee638b73e1..931d812eaaf 100644 --- a/polkadot/xcm/xcm-builder/src/controller.rs +++ b/polkadot/xcm/xcm-builder/src/controller.rs @@ -21,7 +21,7 @@ use frame_support::pallet_prelude::DispatchError; use sp_std::boxed::Box; use xcm::prelude::*; -use xcm_executor::traits::QueryHandler; +pub use xcm_executor::traits::QueryHandler; /// Umbrella trait for all Controller traits. pub trait Controller: diff --git a/polkadot/xcm/xcm-builder/src/lib.rs b/polkadot/xcm/xcm-builder/src/lib.rs index 35f95b85c89..455f17a5348 100644 --- a/polkadot/xcm/xcm-builder/src/lib.rs +++ b/polkadot/xcm/xcm-builder/src/lib.rs @@ -119,5 +119,5 @@ pub use pay::{FixedLocation, LocatableAssetId, PayAccountId32OnChainOverXcm, Pay mod controller; pub use controller::{ Controller, ExecuteController, ExecuteControllerWeightInfo, QueryController, - QueryControllerWeightInfo, SendController, SendControllerWeightInfo, + QueryControllerWeightInfo, QueryHandler, SendController, SendControllerWeightInfo, }; diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index e9adc48ff9c..90946b71311 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1353,6 +1353,7 @@ impl pallet_contracts::Config for Runtime { type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; type Debug = (); type Environment = (); + type Xcm = (); } impl pallet_sudo::Config for Runtime { diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 0eb50c2b0ba..239b0865e0f 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -48,6 +48,9 @@ sp-io = { path = "../../primitives/io", default-features = false} sp-runtime = { path = "../../primitives/runtime", default-features = false} sp-std = { path = "../../primitives/std", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} + [dev-dependencies] array-bytes = "6.1" assert_matches = "1" @@ -56,13 +59,19 @@ pretty_assertions = "1" wat = "1" pallet-contracts-fixtures = { path = "./fixtures" } +# Polkadot Dependencies +xcm-builder = {package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder"} + # Substrate Dependencies pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } +pallet-message-queue = { path = "../message-queue" } pallet-insecure-randomness-collective-flip = { path = "../insecure-randomness-collective-flip" } pallet-utility = { path = "../utility" } +pallet-assets = { path = "../assets" } pallet-proxy = { path = "../proxy" } sp-keystore = { path = "../../primitives/keystore" } +sp-tracing = { path = "../../primitives/tracing" } [features] default = [ "std" ] @@ -92,12 +101,16 @@ std = [ "sp-std/std", "wasm-instrument/std", "wasmi/std", + "xcm-builder/std", + "xcm/std", ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -105,12 +118,15 @@ runtime-benchmarks = [ "rand_pcg", "sp-runtime/runtime-benchmarks", "wasm-instrument", + "xcm-builder/runtime-benchmarks", ] try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime", + "pallet-assets/try-runtime", "pallet-balances/try-runtime", "pallet-insecure-randomness-collective-flip/try-runtime", + "pallet-message-queue/try-runtime", "pallet-proxy/try-runtime", "pallet-timestamp/try-runtime", "pallet-utility/try-runtime", diff --git a/substrate/frame/contracts/fixtures/data/xcm_execute.wat b/substrate/frame/contracts/fixtures/data/xcm_execute.wat new file mode 100644 index 00000000000..b3459996a2e --- /dev/null +++ b/substrate/frame/contracts/fixtures/data/xcm_execute.wat @@ -0,0 +1,52 @@ +;; This passes its input to `seal_xcm_execute` and returns the return value to its caller. +(module + (import "seal0" "xcm_execute" (func $xcm_execute (param i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; Size of input buffer + (data (i32.const 0) "\00\10") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Pointer to the buffer length (before call) and to the copied data length (after call) + ) + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..) - message + + ;; Call xcm_execute with provided input. + (call $assert + (i32.eq + (call $xcm_execute + (i32.const 4) ;; Pointer where the message is stored + (i32.load (i32.const 0)) ;; Size of the message + (i32.const 100) ;; Pointer to the where the outcome is stored + ) + (i32.const 0) + ) + ) + + (call $seal_return + (i32.const 0) ;; flags + (i32.const 100) ;; Pointer to returned value + (i32.const 10) ;; length of returned value + ) + ) + + (func (export "deploy")) +) + diff --git a/substrate/frame/contracts/fixtures/data/xcm_send.wat b/substrate/frame/contracts/fixtures/data/xcm_send.wat new file mode 100644 index 00000000000..9eec6388de9 --- /dev/null +++ b/substrate/frame/contracts/fixtures/data/xcm_send.wat @@ -0,0 +1,59 @@ +;; This passes its input to `seal_xcm_send` and returns the return value to its caller. +(module + (import "seal0" "xcm_send" (func $xcm_send (param i32 i32 i32 i32) (result i32))) + (import "seal0" "seal_input" (func $seal_input (param i32 i32))) + (import "seal0" "seal_return" (func $seal_return (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; 0x1000 = 4k in little endian + ;; size of input buffer + (data (i32.const 0) "\00\10") + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + + ;; Receive the encoded call + (call $seal_input + (i32.const 4) ;; Pointer to the input buffer + (i32.const 0) ;; Size of the length buffer + ) + + ;; Input data layout. + ;; [0..4) - size of the call + ;; [4..7) - dest + ;; [7..) - message + + ;; Call xcm_send with provided input. + (call $assert + (i32.eq + (call $xcm_send + (i32.const 4) ;; Pointer where the dest is stored + (i32.const 7) ;; Pointer where the message is stored + (i32.sub + (i32.load (i32.const 0)) ;; length of the input buffer + (i32.const 3) ;; Size of the XCM dest + ) + (i32.const 100) ;; Pointer to the where the message_id is stored + ) + (i32.const 0) + ) + ) + + ;; Return the the message_id + (call $seal_return + (i32.const 0) ;; flags + (i32.const 100) ;; Pointer to returned value + (i32.const 32) ;; length of returned value + ) + ) + + (func (export "deploy")) +) diff --git a/substrate/frame/contracts/fixtures/src/lib.rs b/substrate/frame/contracts/fixtures/src/lib.rs index 32f4023e644..48117f7ca94 100644 --- a/substrate/frame/contracts/fixtures/src/lib.rs +++ b/substrate/frame/contracts/fixtures/src/lib.rs @@ -23,6 +23,8 @@ fn fixtures_root_dir() -> PathBuf { // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder (Err(_), _) => "substrate/frame/contracts/fixtures/data".into(), (Ok(path), Ok(s)) if s == "pallet-contracts" => PathBuf::from(path).join("fixtures/data"), + (Ok(path), Ok(s)) if s == "pallet-contracts-mock-network" => + PathBuf::from(path).parent().unwrap().join("fixtures/data"), (Ok(_), pkg_name) => panic!("Failed to resolve fixture dir for tests from {pkg_name:?}."), } } diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml new file mode 100644 index 00000000000..9d5fe1aaf4e --- /dev/null +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "pallet-contracts-mock-network" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage = "https://substrate.io" +repository.workspace = true +description = "A mock network for testing pallet-contracts" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len"] } + +frame-support = { path = "../../support", default-features = false} +frame-system = { path = "../../system", default-features = false} +pallet-assets = { path = "../../assets" } +pallet-balances = { path = "../../balances" } +pallet-contracts = { path = ".." } +pallet-contracts-primitives = { path = "../primitives", default-features = false} +pallet-contracts-proc-macro = { path = "../proc-macro" } +pallet-insecure-randomness-collective-flip = { path = "../../insecure-randomness-collective-flip" } +pallet-message-queue = { path = "../../message-queue" } +pallet-proxy = { path = "../../proxy" } +pallet-timestamp = { path = "../../timestamp" } +pallet-utility = { path = "../../utility" } +pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false} +polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } +polkadot-primitives = { path = "../../../../polkadot/primitives" } +polkadot-runtime-parachains = {path = "../../../../polkadot/runtime/parachains"} +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +sp-api = { path = "../../../primitives/api", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false} +sp-keystore = { path = "../../../primitives/keystore" } +sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-std = { path = "../../../primitives/std", default-features = false} +sp-tracing = { path = "../../../primitives/tracing" } +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} +xcm-builder = {package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder"} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm-simulator = {path = "../../../../polkadot/xcm/xcm-simulator"} + +[dev-dependencies] +assert_matches = "1" +pretty_assertions = "1" +pallet-contracts-fixtures = { path = "../fixtures" } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "pallet-balances/std", + "pallet-contracts-primitives/std", + "pallet-contracts-proc-macro/full", + "pallet-contracts/std", + "pallet-insecure-randomness-collective-flip/std", + "pallet-proxy/std", + "pallet-timestamp/std", + "pallet-utility/std", + "pallet-xcm/std", + "scale-info/std", + "sp-api/std", + "sp-core/std", + "sp-io/std", + "sp-keystore/std", + "sp-runtime/std", + "sp-std/std", + "xcm-executor/std", + "xcm/std", +] + +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-assets/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-contracts/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-primitives/runtime-benchmarks", + "polkadot-runtime-parachains/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] diff --git a/substrate/frame/contracts/mock-network/src/lib.rs b/substrate/frame/contracts/mock-network/src/lib.rs new file mode 100644 index 00000000000..345c69541b6 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/lib.rs @@ -0,0 +1,151 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod mocks; +pub mod parachain; +pub mod primitives; +pub mod relay_chain; + +#[cfg(test)] +mod tests; + +use crate::primitives::{AccountId, UNITS}; +use sp_runtime::BuildStorage; +use xcm::latest::{prelude::*, MultiLocation}; +use xcm_executor::traits::ConvertLocation; +use xcm_simulator::{decl_test_network, decl_test_parachain, decl_test_relay_chain, TestExt}; + +// Accounts +pub const ADMIN: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([0u8; 32]); +pub const ALICE: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([1u8; 32]); +pub const BOB: sp_runtime::AccountId32 = sp_runtime::AccountId32::new([2u8; 32]); + +// Balances +pub const INITIAL_BALANCE: u128 = 1_000_000_000 * UNITS; + +decl_test_parachain! { + pub struct ParaA { + Runtime = parachain::Runtime, + XcmpMessageHandler = parachain::MsgQueue, + DmpMessageHandler = parachain::MsgQueue, + new_ext = para_ext(1), + } +} + +decl_test_relay_chain! { + pub struct Relay { + Runtime = relay_chain::Runtime, + RuntimeCall = relay_chain::RuntimeCall, + RuntimeEvent = relay_chain::RuntimeEvent, + XcmConfig = relay_chain::XcmConfig, + MessageQueue = relay_chain::MessageQueue, + System = relay_chain::System, + new_ext = relay_ext(), + } +} + +decl_test_network! { + pub struct MockNet { + relay_chain = Relay, + parachains = vec![ + (1, ParaA), + ], + } +} + +pub fn relay_sovereign_account_id() -> AccountId { + let location: MultiLocation = (Parent,).into(); + parachain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn parachain_sovereign_account_id(para: u32) -> AccountId { + let location: MultiLocation = (Parachain(para),).into(); + relay_chain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn parachain_account_sovereign_account_id( + para: u32, + who: sp_runtime::AccountId32, +) -> AccountId { + let location: MultiLocation = ( + Parachain(para), + AccountId32 { network: Some(relay_chain::RelayNetwork::get()), id: who.into() }, + ) + .into(); + relay_chain::SovereignAccountOf::convert_location(&location).unwrap() +} + +pub fn para_ext(para_id: u32) -> sp_io::TestExternalities { + use parachain::{MsgQueue, Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (ALICE, INITIAL_BALANCE), + (relay_sovereign_account_id(), INITIAL_BALANCE), + (BOB, INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + pallet_assets::GenesisConfig:: { + assets: vec![ + (0u128, ADMIN, false, 1u128), // Create derivative asset for relay's native token + ], + metadata: Default::default(), + accounts: vec![ + (0u128, ALICE, INITIAL_BALANCE), + (0u128, relay_sovereign_account_id(), INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + sp_tracing::try_init_simple(); + System::set_block_number(1); + MsgQueue::set_para_id(para_id.into()); + }); + ext +} + +pub fn relay_ext() -> sp_io::TestExternalities { + use relay_chain::{Runtime, System}; + + let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_balances::GenesisConfig:: { + balances: vec![ + (ALICE, INITIAL_BALANCE), + (parachain_sovereign_account_id(1), INITIAL_BALANCE), + (parachain_account_sovereign_account_id(1, ALICE), INITIAL_BALANCE), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| { + System::set_block_number(1); + }); + ext +} + +pub type ParachainPalletXcm = pallet_xcm::Pallet; +pub type ParachainBalances = pallet_balances::Pallet; diff --git a/substrate/frame/contracts/mock-network/src/mocks.rs b/substrate/frame/contracts/mock-network/src/mocks.rs new file mode 100644 index 00000000000..bf3baec7a52 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks.rs @@ -0,0 +1,18 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub mod msg_queue; +pub mod relay_message_queue; diff --git a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs new file mode 100644 index 00000000000..82fb8590e26 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs @@ -0,0 +1,168 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +use codec::{Decode, Encode}; + +use frame_support::weights::Weight; +use polkadot_parachain_primitives::primitives::{ + DmpMessageHandler, Id as ParaId, XcmpMessageFormat, XcmpMessageHandler, +}; +use polkadot_primitives::BlockNumber as RelayBlockNumber; +use sp_runtime::traits::{Get, Hash}; + +use sp_std::prelude::*; +use xcm::{latest::prelude::*, VersionedXcm}; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + type XcmExecutor: ExecuteXcm; + } + + #[pallet::call] + impl Pallet {} + + #[pallet::pallet] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::storage] + #[pallet::getter(fn parachain_id)] + pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; + + #[pallet::storage] + #[pallet::getter(fn received_dmp)] + /// A queue of received DMP messages + pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; + + impl Get for Pallet { + fn get() -> ParaId { + Self::parachain_id() + } + } + + pub type MessageId = [u8; 32]; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Some XCM was executed OK. + Success(Option), + /// Some XCM failed. + Fail(Option, XcmError), + /// Bad XCM version used. + BadVersion(Option), + /// Bad XCM format used. + BadFormat(Option), + + // DMP + /// Downward message is invalid XCM. + InvalidFormat(MessageId), + /// Downward message is unsupported version of XCM. + UnsupportedVersion(MessageId), + /// Downward message executed with the given outcome. + ExecutedDownward(MessageId, Outcome), + } + + impl Pallet { + pub fn set_para_id(para_id: ParaId) { + ParachainId::::put(para_id); + } + + fn handle_xcmp_message( + sender: ParaId, + _sent_at: RelayBlockNumber, + xcm: VersionedXcm, + max_weight: Weight, + ) -> Result { + let hash = Encode::using_encoded(&xcm, T::Hashing::hash); + let message_hash = Encode::using_encoded(&xcm, sp_io::hashing::blake2_256); + let (result, event) = match Xcm::::try_from(xcm) { + Ok(xcm) => { + let location = (Parent, Parachain(sender.into())); + match T::XcmExecutor::execute_xcm(location, xcm, message_hash, max_weight) { + Outcome::Error(e) => (Err(e), Event::Fail(Some(hash), e)), + Outcome::Complete(w) => (Ok(w), Event::Success(Some(hash))), + // As far as the caller is concerned, this was dispatched without error, so + // we just report the weight used. + Outcome::Incomplete(w, e) => (Ok(w), Event::Fail(Some(hash), e)), + } + }, + Err(()) => (Err(XcmError::UnhandledXcmVersion), Event::BadVersion(Some(hash))), + }; + Self::deposit_event(event); + result + } + } + + impl XcmpMessageHandler for Pallet { + fn handle_xcmp_messages<'a, I: Iterator>( + iter: I, + max_weight: Weight, + ) -> Weight { + for (sender, sent_at, data) in iter { + let mut data_ref = data; + let _ = XcmpMessageFormat::decode(&mut data_ref) + .expect("Simulator encodes with versioned xcm format; qed"); + + let mut remaining_fragments = data_ref; + while !remaining_fragments.is_empty() { + if let Ok(xcm) = + VersionedXcm::::decode(&mut remaining_fragments) + { + let _ = Self::handle_xcmp_message(sender, sent_at, xcm, max_weight); + } else { + debug_assert!(false, "Invalid incoming XCMP message data"); + } + } + } + max_weight + } + } + + impl DmpMessageHandler for Pallet { + fn handle_dmp_messages( + iter: impl Iterator)>, + limit: Weight, + ) -> Weight { + for (_i, (_sent_at, data)) in iter.enumerate() { + let id = sp_io::hashing::blake2_256(&data[..]); + let maybe_versioned = VersionedXcm::::decode(&mut &data[..]); + match maybe_versioned { + Err(_) => { + Self::deposit_event(Event::InvalidFormat(id)); + }, + Ok(versioned) => match Xcm::try_from(versioned) { + Err(()) => Self::deposit_event(Event::UnsupportedVersion(id)), + Ok(x) => { + let outcome = T::XcmExecutor::execute_xcm(Parent, x.clone(), id, limit); + >::append(x); + Self::deposit_event(Event::ExecutedDownward(id, outcome)); + }, + }, + } + } + limit + } + } +} diff --git a/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs new file mode 100644 index 00000000000..14099965e3f --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/mocks/relay_message_queue.rs @@ -0,0 +1,52 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use frame_support::{parameter_types, weights::Weight}; +use xcm::latest::prelude::*; +use xcm_simulator::{ + AggregateMessageOrigin, ProcessMessage, ProcessMessageError, UmpQueueId, WeightMeter, +}; + +use crate::relay_chain::{RuntimeCall, XcmConfig}; + +parameter_types! { + /// Amount of weight that can be spent per block to service messages. + pub MessageQueueServiceWeight: Weight = Weight::from_parts(1_000_000_000, 1_000_000); + pub const MessageQueueHeapSize: u32 = 65_536; + pub const MessageQueueMaxStale: u32 = 16; +} + +/// Message processor to handle any messages that were enqueued into the `MessageQueue` pallet. +pub struct MessageProcessor; +impl ProcessMessage for MessageProcessor { + type Origin = AggregateMessageOrigin; + + fn process_message( + message: &[u8], + origin: Self::Origin, + meter: &mut WeightMeter, + id: &mut [u8; 32], + ) -> Result { + let para = match origin { + AggregateMessageOrigin::Ump(UmpQueueId::Para(para)) => para, + }; + xcm_builder::ProcessXcmMessage::< + Junction, + xcm_executor::XcmExecutor, + RuntimeCall, + >::process_message(message, Junction::Parachain(para.into()), meter, id) + } +} diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs new file mode 100644 index 00000000000..1465b02f903 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -0,0 +1,353 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain runtime mock. + +mod contracts_config; +use crate::{ + mocks::msg_queue::pallet as mock_msg_queue, + primitives::{AccountId, AssetIdForAssets, Balance}, +}; +use core::marker::PhantomData; +use frame_support::{ + construct_runtime, parameter_types, + traits::{AsEnsureOriginWithArg, Contains, ContainsPair, Everything, EverythingBut, Nothing}, + weights::{ + constants::{WEIGHT_PROOF_SIZE_PER_MB, WEIGHT_REF_TIME_PER_SECOND}, + Weight, + }, +}; +use frame_system::{EnsureRoot, EnsureSigned}; +use pallet_xcm::XcmPassthrough; +use sp_core::{ConstU32, ConstU64, H256}; +use sp_runtime::traits::{Get, IdentityLookup, MaybeEquivalence}; + +use sp_std::prelude::*; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom, + ConvertedConcreteId, CurrencyAdapter as XcmCurrencyAdapter, EnsureXcmOrigin, + FixedRateOfFungible, FixedWeightBounds, FungiblesAdapter, IsConcrete, NativeAsset, NoChecking, + ParentAsSuperuser, ParentIsPreset, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, WithComputedOrigin, +}; +use xcm_executor::{traits::JustTry, Config, XcmExecutor}; + +pub type SovereignAccountOf = + (AccountId32Aliases, ParentIsPreset); + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Block = Block; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type AccountStore = System; + type Balance = Balance; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type FreezeIdentifier = (); + type MaxFreezes = ConstU32<0>; + type MaxHolds = ConstU32<1>; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type WeightInfo = (); +} + +parameter_types! { + pub const AssetDeposit: u128 = 1_000_000; + pub const MetadataDepositBase: u128 = 1_000_000; + pub const MetadataDepositPerByte: u128 = 100_000; + pub const AssetAccountDeposit: u128 = 1_000_000; + pub const ApprovalDeposit: u128 = 1_000_000; + pub const AssetsStringLimit: u32 = 50; + pub const RemoveItemsLimit: u32 = 50; +} + +impl pallet_assets::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Balance = Balance; + type AssetId = AssetIdForAssets; + type Currency = Balances; + type CreateOrigin = AsEnsureOriginWithArg>; + type ForceOrigin = EnsureRoot; + type AssetDeposit = AssetDeposit; + type MetadataDepositBase = MetadataDepositBase; + type MetadataDepositPerByte = MetadataDepositPerByte; + type AssetAccountDeposit = AssetAccountDeposit; + type ApprovalDeposit = ApprovalDeposit; + type StringLimit = AssetsStringLimit; + type Freezer = (); + type Extra = (); + type WeightInfo = (); + type RemoveItemsLimit = RemoveItemsLimit; + type AssetIdParameter = AssetIdForAssets; + type CallbackHandle = (); + #[cfg(feature = "runtime-benchmarks")] + type BenchmarkHelper = (); +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); + pub const ReservedDmpWeight: Weight = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_div(4), 0); +} + +parameter_types! { + pub const KsmLocation: MultiLocation = MultiLocation::parent(); + pub const TokenLocation: MultiLocation = Here.into_location(); + pub const RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub UniversalLocation: InteriorMultiLocation = Parachain(MsgQueue::parachain_id().into()).into(); +} + +pub type XcmOriginToCallOrigin = ( + SovereignSignedViaLocation, + ParentAsSuperuser, + SignedAccountId32AsNative, + XcmPassthrough, +); + +parameter_types! { + pub const XcmInstructionWeight: Weight = Weight::from_parts(1_000, 1_000); + pub TokensPerSecondPerMegabyte: (AssetId, u128, u128) = (Concrete(Parent.into()), 1_000_000_000_000, 1024 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + pub ForeignPrefix: MultiLocation = (Parent,).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub TrustedLockPairs: (MultiLocation, MultiAssetFilter) = + (Parent.into(), Wild(AllOf { id: Concrete(Parent.into()), fun: WildFungible })); +} + +pub fn estimate_message_fee(number_of_instructions: u64) -> u128 { + let weight = estimate_weight(number_of_instructions); + + estimate_fee_for_weight(weight) +} + +pub fn estimate_weight(number_of_instructions: u64) -> Weight { + XcmInstructionWeight::get().saturating_mul(number_of_instructions) +} + +pub fn estimate_fee_for_weight(weight: Weight) -> u128 { + let (_, units_per_second, units_per_mb) = TokensPerSecondPerMegabyte::get(); + + units_per_second * (weight.ref_time() as u128) / (WEIGHT_REF_TIME_PER_SECOND as u128) + + units_per_mb * (weight.proof_size() as u128) / (WEIGHT_PROOF_SIZE_PER_MB as u128) +} + +pub type LocalBalancesTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +pub struct FromMultiLocationToAsset(PhantomData<(MultiLocation, AssetId)>); +impl MaybeEquivalence + for FromMultiLocationToAsset +{ + fn convert(value: &MultiLocation) -> Option { + match *value { + MultiLocation { parents: 1, interior: Here } => Some(0 as AssetIdForAssets), + MultiLocation { parents: 1, interior: X1(Parachain(para_id)) } => + Some(para_id as AssetIdForAssets), + _ => None, + } + } + + fn convert_back(_id: &AssetIdForAssets) -> Option { + None + } +} + +pub type ForeignAssetsTransactor = FungiblesAdapter< + Assets, + ConvertedConcreteId< + AssetIdForAssets, + Balance, + FromMultiLocationToAsset, + JustTry, + >, + SovereignAccountOf, + AccountId, + NoChecking, + CheckingAccount, +>; + +/// Means for transacting assets on this chain +pub type AssetTransactors = (LocalBalancesTransactor, ForeignAssetsTransactor); + +pub struct ParentRelay; +impl Contains for ParentRelay { + fn contains(location: &MultiLocation) -> bool { + location.contains_parents_only(1) + } +} +pub struct ThisParachain; +impl Contains for ThisParachain { + fn contains(location: &MultiLocation) -> bool { + matches!( + location, + MultiLocation { parents: 0, interior: Junctions::X1(Junction::AccountId32 { .. }) } + ) + } +} + +pub type XcmRouter = crate::ParachainXcmRouter; + +pub type Barrier = ( + xcm_builder::AllowUnpaidExecutionFrom, + WithComputedOrigin< + (AllowExplicitUnpaidExecutionFrom, AllowTopLevelPaidExecutionFrom), + UniversalLocation, + ConstU32<1>, + >, +); + +parameter_types! { + pub NftCollectionOne: MultiAssetFilter + = Wild(AllOf { fun: WildNonFungible, id: Concrete((Parent, GeneralIndex(1)).into()) }); + pub NftCollectionOneForRelay: (MultiAssetFilter, MultiLocation) + = (NftCollectionOne::get(), Parent.into()); + pub RelayNativeAsset: MultiAssetFilter = Wild(AllOf { fun: WildFungible, id: Concrete((Parent, Here).into()) }); + pub RelayNativeAssetForRelay: (MultiAssetFilter, MultiLocation) = (RelayNativeAsset::get(), Parent.into()); +} +pub type TrustedTeleporters = + (xcm_builder::Case, xcm_builder::Case); +pub type TrustedReserves = EverythingBut>; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = XcmOriginToCallOrigin; + type IsReserve = (NativeAsset, TrustedReserves); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetLocker = PolkadotXcm; + type AssetExchanger = (); + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type FeeManager = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +impl mock_msg_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +pub struct TrustedLockerCase(PhantomData); +impl> ContainsPair + for TrustedLockerCase +{ + fn contains(origin: &MultiLocation, asset: &MultiAsset) -> bool { + let (o, a) = T::get(); + a.matches(asset) && &o == origin + } +} + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Nothing; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = TrustedLockerCase; + type SovereignAccountOf = SovereignAccountOf; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +type Block = frame_system::mocking::MockBlock; + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<1>; + type WeightInfo = (); +} + +construct_runtime!( + pub enum Runtime + { + System: frame_system, + Balances: pallet_balances, + Timestamp: pallet_timestamp, + MsgQueue: mock_msg_queue, + PolkadotXcm: pallet_xcm, + Contracts: pallet_contracts, + Assets: pallet_assets, + } +); diff --git a/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs new file mode 100644 index 00000000000..dadba394e26 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/parachain/contracts_config.rs @@ -0,0 +1,98 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::{Balances, Runtime, RuntimeCall, RuntimeEvent}; +use crate::{ + parachain, + parachain::RuntimeHoldReason, + primitives::{Balance, CENTS}, +}; +use frame_support::{ + parameter_types, + traits::{ConstBool, ConstU32, Contains, Randomness}, + weights::Weight, +}; +use frame_system::pallet_prelude::BlockNumberFor; +use pallet_xcm::BalanceOf; +use sp_runtime::{traits::Convert, Perbill}; + +pub const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 1 * CENTS + (bytes as Balance) * 1 * CENTS +} + +parameter_types! { + pub const DepositPerItem: Balance = deposit(1, 0); + pub const DepositPerByte: Balance = deposit(0, 1); + pub const DefaultDepositLimit: Balance = deposit(1024, 1024 * 1024); + pub Schedule: pallet_contracts::Schedule = Default::default(); + pub const CodeHashLockupDepositPercent: Perbill = Perbill::from_percent(0); + pub const MaxDelegateDependencies: u32 = 32; +} + +pub struct DummyRandomness(sp_std::marker::PhantomData); + +impl Randomness> for DummyRandomness { + fn random(_subject: &[u8]) -> (T::Hash, BlockNumberFor) { + (Default::default(), Default::default()) + } +} + +impl Convert> for Runtime { + fn convert(w: Weight) -> BalanceOf { + w.ref_time().into() + } +} + +#[derive(Clone, Default)] +pub struct Filters; + +impl Contains for Filters { + fn contains(call: &RuntimeCall) -> bool { + match call { + parachain::RuntimeCall::Contracts(_) => true, + _ => false, + } + } +} + +impl pallet_contracts::Config for Runtime { + type AddressGenerator = pallet_contracts::DefaultAddressGenerator; + type CallFilter = Filters; + type CallStack = [pallet_contracts::Frame; 5]; + type ChainExtension = (); + type CodeHashLockupDepositPercent = CodeHashLockupDepositPercent; + type Currency = Balances; + type DefaultDepositLimit = DefaultDepositLimit; + type DepositPerByte = DepositPerByte; + type DepositPerItem = DepositPerItem; + type MaxCodeLen = ConstU32<{ 123 * 1024 }>; + type MaxDebugBufferLen = ConstU32<{ 2 * 1024 * 1024 }>; + type MaxDelegateDependencies = MaxDelegateDependencies; + type MaxStorageKeyLen = ConstU32<128>; + type Migrations = (); + type Randomness = DummyRandomness; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type RuntimeHoldReason = RuntimeHoldReason; + type Schedule = Schedule; + type Time = super::Timestamp; + type UnsafeUnstableInterface = ConstBool; + type WeightInfo = (); + type WeightPrice = Self; + type Debug = (); + type Environment = (); + type Xcm = pallet_xcm::Pallet; +} diff --git a/substrate/frame/contracts/mock-network/src/primitives.rs b/substrate/frame/contracts/mock-network/src/primitives.rs new file mode 100644 index 00000000000..efc42772f88 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/primitives.rs @@ -0,0 +1,23 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +pub type Balance = u128; + +pub const UNITS: Balance = 10_000_000_000; +pub const CENTS: Balance = UNITS / 100; // 100_000_000 + +pub type AccountId = sp_runtime::AccountId32; +pub type AssetIdForAssets = u128; diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs new file mode 100644 index 00000000000..c59c8e4bfa8 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -0,0 +1,236 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Relay chain runtime mock. + +use frame_support::{ + construct_runtime, parameter_types, + traits::{Contains, Everything, Nothing}, + weights::Weight, +}; + +use frame_system::EnsureRoot; +use sp_core::{ConstU32, H256}; +use sp_runtime::traits::IdentityLookup; + +use polkadot_parachain_primitives::primitives::Id as ParaId; +use polkadot_runtime_parachains::{configuration, origin, shared}; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowSubscriptionsFrom, + AllowTopLevelPaidExecutionFrom, ChildParachainAsNative, ChildParachainConvertsVia, + ChildSystemParachainAsSuperuser, CurrencyAdapter as XcmCurrencyAdapter, DescribeAllTerminal, + DescribeFamily, FixedRateOfFungible, FixedWeightBounds, HashedDescription, IsConcrete, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, WithComputedOrigin, +}; +use xcm_executor::{Config, XcmExecutor}; + +use super::{ + mocks::relay_message_queue::*, + primitives::{AccountId, Balance}, +}; + +parameter_types! { + pub const BlockHashCount: u64 = 250; +} + +impl frame_system::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Block = Block; + type Nonce = u64; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = BlockHashCount; + type BlockWeights = (); + type BlockLength = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = Everything; + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +parameter_types! { + pub ExistentialDeposit: Balance = 1; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = MaxLocks; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; + type FreezeIdentifier = (); + type MaxHolds = ConstU32<0>; + type MaxFreezes = ConstU32<0>; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; +} + +impl shared::Config for Runtime {} + +impl configuration::Config for Runtime { + type WeightInfo = configuration::TestWeightInfo; +} + +parameter_types! { + pub RelayNetwork: NetworkId = ByGenesis([0; 32]); + pub const TokenLocation: MultiLocation = Here.into_location(); + pub UniversalLocation: InteriorMultiLocation = Here; + pub UnitWeightCost: u64 = 1_000; +} + +pub type SovereignAccountOf = ( + HashedDescription>, + AccountId32Aliases, + ChildParachainConvertsVia, +); + +pub type LocalBalancesTransactor = + XcmCurrencyAdapter, SovereignAccountOf, AccountId, ()>; + +pub type AssetTransactors = LocalBalancesTransactor; + +type LocalOriginConverter = ( + SovereignSignedViaLocation, + ChildParachainAsNative, + SignedAccountId32AsNative, + ChildSystemParachainAsSuperuser, +); + +parameter_types! { + pub const XcmInstructionWeight: Weight = Weight::from_parts(1_000, 1_000); + pub TokensPerSecondPerMegabyte: (AssetId, u128, u128) = + (Concrete(TokenLocation::get()), 1_000_000_000_000, 1024 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +pub struct ChildrenParachains; +impl Contains for ChildrenParachains { + fn contains(location: &MultiLocation) -> bool { + matches!(location, MultiLocation { parents: 0, interior: X1(Parachain(_)) }) + } +} + +pub type XcmRouter = crate::RelayChainXcmRouter; +pub type Barrier = WithComputedOrigin< + ( + AllowExplicitUnpaidExecutionFrom, + AllowTopLevelPaidExecutionFrom, + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<1>, +>; + +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = AssetTransactors; + type OriginConverter = LocalOriginConverter; + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = FixedRateOfFungible; + type ResponseHandler = XcmPallet; + type AssetTrap = XcmPallet; + type AssetLocker = XcmPallet; + type AssetExchanger = (); + type AssetClaims = XcmPallet; + type SubscriptionService = XcmPallet; + type PalletInstancesInfo = AllPalletsWithSystem; + type FeeManager = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +pub type LocalOriginToLocation = SignedToAccountId32; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = IsConcrete; + type TrustedLockers = (); + type SovereignAccountOf = SovereignAccountOf; + type MaxLockers = ConstU32<8>; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); + type WeightInfo = pallet_xcm::TestWeightInfo; + type AdminOrigin = EnsureRoot; +} + +impl origin::Config for Runtime {} + +type Block = frame_system::mocking::MockBlock; + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Size = u32; + type HeapSize = MessageQueueHeapSize; + type MaxStale = MessageQueueMaxStale; + type ServiceWeight = MessageQueueServiceWeight; + type MessageProcessor = MessageProcessor; + type QueueChangeHandler = (); + type WeightInfo = (); + type QueuePausedQuery = (); +} + +construct_runtime!( + pub enum Runtime { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + ParasOrigin: origin::{Pallet, Origin}, + XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin}, + MessageQueue: pallet_message_queue::{Pallet, Event}, + } +); diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs new file mode 100644 index 00000000000..5193f657055 --- /dev/null +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -0,0 +1,239 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + parachain::{self, Runtime}, + parachain_account_sovereign_account_id, + primitives::{AccountId, CENTS}, + relay_chain, MockNet, ParaA, ParachainBalances, Relay, ALICE, BOB, INITIAL_BALANCE, +}; +use assert_matches::assert_matches; +use codec::{Decode, Encode}; +use frame_support::{ + assert_err, + pallet_prelude::Weight, + traits::{fungibles::Mutate, Currency}, +}; +use pallet_balances::{BalanceLock, Reasons}; +use pallet_contracts::{CollectEvents, DebugInfo, Determinism}; +use pallet_contracts_fixtures::compile_module; +use pallet_contracts_primitives::Code; +use xcm::{v3::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_simulator::TestExt; + +type ParachainContracts = pallet_contracts::Pallet; + +/// Instantiate the tests contract, and fund it with some balance and assets. +fn instantiate_test_contract(name: &str) -> AccountId { + let (wasm, _) = compile_module::(name).unwrap(); + + // Instantiate contract. + let contract_addr = ParaA::execute_with(|| { + ParachainContracts::bare_instantiate( + ALICE, + 0, + Weight::MAX, + None, + Code::Upload(wasm), + vec![], + vec![], + DebugInfo::UnsafeDebug, + CollectEvents::Skip, + ) + .result + .unwrap() + .account_id + }); + + // Funds contract account with some balance and assets. + ParaA::execute_with(|| { + parachain::Balances::make_free_balance_be(&contract_addr, INITIAL_BALANCE); + parachain::Assets::mint_into(0u32.into(), &contract_addr, INITIAL_BALANCE).unwrap(); + }); + Relay::execute_with(|| { + let sovereign_account = parachain_account_sovereign_account_id(1u32, contract_addr.clone()); + relay_chain::Balances::make_free_balance_be(&sovereign_account, INITIAL_BALANCE); + }); + + contract_addr +} + +#[test] +fn test_xcm_execute() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + // Execute XCM instructions through the contract. + ParaA::execute_with(|| { + let amount: u128 = 10 * CENTS; + + // The XCM used to transfer funds to Bob. + let message: xcm_simulator::Xcm<()> = Xcm(vec![ + WithdrawAsset(vec![(Here, amount).into()].into()), + DepositAsset { + assets: All.into(), + beneficiary: AccountId32 { network: None, id: BOB.clone().into() }.into(), + }, + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ) + .result + .unwrap(); + + let mut data = &result.data[..]; + let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); + assert_matches!(outcome, Outcome::Complete(_)); + + // Check if the funds are subtracted from the account of Alice and added to the account of + // Bob. + let initial = INITIAL_BALANCE; + assert_eq!(parachain::Assets::balance(0, contract_addr), initial); + assert_eq!(ParachainBalances::free_balance(BOB), initial + amount); + }); +} + +#[test] +fn test_xcm_execute_filtered_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + // `remark` should be rejected, as it is not allowed by our CallFilter. + let call = parachain::RuntimeCall::System(frame_system::Call::remark { remark: vec![] }); + let message: Xcm = Xcm(vec![Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: Weight::MAX, + call: call.encode().into(), + }]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + assert_err!(result.result, frame_system::Error::::CallFiltered); + }); +} + +#[test] +fn test_xcm_execute_reentrant_call() { + MockNet::reset(); + + let contract_addr = instantiate_test_contract("xcm_execute"); + + ParaA::execute_with(|| { + let transact_call = parachain::RuntimeCall::Contracts(pallet_contracts::Call::call { + dest: contract_addr.clone(), + gas_limit: 1_000_000.into(), + storage_deposit_limit: None, + data: vec![], + value: 0u128, + }); + + // The XCM used to transfer funds to Bob. + let message: Xcm = Xcm(vec![ + Transact { + origin_kind: OriginKind::Native, + require_weight_at_most: 1_000_000_000.into(), + call: transact_call.encode().into(), + }, + ExpectTransactStatus(MaybeErrorCode::Success), + ]); + + let result = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + VersionedXcm::V3(message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ) + .result + .unwrap(); + + let mut data = &result.data[..]; + let outcome = Outcome::decode(&mut data).expect("Failed to decode xcm_execute Outcome"); + assert_matches!(outcome, Outcome::Incomplete(_, XcmError::ExpectationFalse)); + + // Funds should not change hands as the XCM transact failed. + assert_eq!(ParachainBalances::free_balance(BOB), INITIAL_BALANCE); + }); +} + +#[test] +fn test_xcm_send() { + MockNet::reset(); + let contract_addr = instantiate_test_contract("xcm_send"); + let fee = parachain::estimate_message_fee(4); // Accounts for the `DescendOrigin` instruction added by `send_xcm` + + // Send XCM instructions through the contract, to lock some funds on the relay chain. + ParaA::execute_with(|| { + let dest = MultiLocation::from(Parent); + let dest = VersionedMultiLocation::V3(dest); + + let message: xcm_simulator::Xcm<()> = Xcm(vec![ + WithdrawAsset((Here, fee).into()), + BuyExecution { fees: (Here, fee).into(), weight_limit: WeightLimit::Unlimited }, + LockAsset { asset: (Here, 5 * CENTS).into(), unlocker: (Parachain(1)).into() }, + ]); + let message = VersionedXcm::V3(message); + let exec = ParachainContracts::bare_call( + ALICE, + contract_addr.clone(), + 0, + Weight::MAX, + None, + (dest, message).encode(), + DebugInfo::UnsafeDebug, + CollectEvents::UnsafeCollect, + Determinism::Enforced, + ); + + let mut data = &exec.result.unwrap().data[..]; + XcmHash::decode(&mut data).expect("Failed to decode xcm_send message_id"); + }); + + Relay::execute_with(|| { + // Check if the funds are locked on the relay chain. + assert_eq!( + relay_chain::Balances::locks(¶chain_account_sovereign_account_id(1, contract_addr)), + vec![BalanceLock { id: *b"py/xcmlk", amount: 5 * CENTS, reasons: Reasons::All }] + ); + }); +} diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 7d516fbe249..188679dbf49 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -403,6 +403,14 @@ pub mod pallet { /// its type appears in the metadata. Only valid value is `()`. #[pallet::constant] type Environment: Get>; + + /// A type that exposes XCM APIs, allowing contracts to interact with other parachains, and + /// execute XCM programs. + type Xcm: xcm_builder::Controller< + OriginFor, + ::RuntimeCall, + BlockNumberFor, + >; } #[pallet::hooks] @@ -1004,6 +1012,8 @@ pub mod pallet { /// in this error. Note that this usually shouldn't happen as deploying such contracts /// is rejected. NoChainExtension, + /// Failed to decode the XCM program. + XCMDecodeFailed, /// A contract with the same AccountId already exists. DuplicateContract, /// A contract self destructed in its constructor. diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index e7784b02b74..76fd012852a 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -486,6 +486,7 @@ impl Config for Test { type MaxDelegateDependencies = MaxDelegateDependencies; type Debug = TestDebug; type Environment = (); + type Xcm = (); } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 4fd52b471a0..b3013adb790 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -23,10 +23,16 @@ use crate::{ schedule::HostFnWeights, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; - use bitflags::bitflags; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; -use frame_support::{ensure, traits::Get, weights::Weight}; +use frame_support::{ + dispatch::DispatchInfo, + ensure, + pallet_prelude::{DispatchResult, DispatchResultWithPostInfo}, + parameter_types, + traits::Get, + weights::Weight, +}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pallet_contracts_proc_macro::define_env; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; @@ -36,6 +42,9 @@ use sp_runtime::{ }; use sp_std::{fmt, prelude::*}; use wasmi::{core::HostError, errors::LinkerError, Linker, Memory, Store}; +use xcm::VersionedXcm; + +type CallOf = ::RuntimeCall; /// The maximum nesting depth a contract can use when encoding types. const MAX_DECODE_NESTING: u32 = 256; @@ -113,6 +122,17 @@ pub enum ReturnCode { EcdsaRecoverFailed = 11, /// sr25519 signature verification failed. Sr25519VerifyFailed = 12, + /// The `xcm_execute` call failed. + XcmExecutionFailed = 13, + /// The `xcm_send` call failed. + XcmSendFailed = 14, +} + +parameter_types! { + /// Getter types used by [`crate::api_doc::Current::call_runtime`] + const CallRuntimeFailed: ReturnCode = ReturnCode::CallRuntimeFailed; + /// Getter types used by [`crate::api_doc::Current::xcm_execute`] + const XcmExecutionFailed: ReturnCode = ReturnCode::XcmExecutionFailed; } impl From for ReturnCode { @@ -461,6 +481,29 @@ fn already_charged(_: u32) -> Option { None } +/// Ensure that the XCM program is executable, by checking that it does not contain any [`Transact`] +/// instruction with a call that is not allowed by the CallFilter. +fn ensure_executable(message: &VersionedXcm>) -> DispatchResult { + use frame_support::traits::Contains; + use xcm::prelude::{Transact, Xcm}; + + let mut message: Xcm> = + message.clone().try_into().map_err(|_| Error::::XCMDecodeFailed)?; + + message.iter_mut().try_for_each(|inst| -> DispatchResult { + let Transact { ref mut call, .. } = inst else { return Ok(()) }; + let call = call.ensure_decoded().map_err(|_| Error::::XCMDecodeFailed)?; + + if !::CallFilter::contains(call) { + return Err(frame_system::Error::::CallFiltered.into()) + } + + Ok(()) + })?; + + Ok(()) +} + /// Can only be used for one call. pub struct Runtime<'a, E: Ext + 'a> { ext: &'a mut E, @@ -558,6 +601,32 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { self.ext.gas_meter_mut().adjust_gas(charged, token); } + /// Charge, Run and adjust gas, for executing the given dispatchable. + fn call_dispatchable< + ErrorReturnCode: Get, + F: FnOnce(&mut Self) -> DispatchResultWithPostInfo, + >( + &mut self, + dispatch_info: DispatchInfo, + run: F, + ) -> Result { + use frame_support::dispatch::extract_actual_weight; + let charged = self.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; + let result = run(self); + let actual_weight = extract_actual_weight(&result, &dispatch_info); + self.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); + match result { + Ok(_) => Ok(ReturnCode::Success), + Err(e) => { + if self.ext.append_debug_buffer("") { + self.ext.append_debug_buffer("call failed with: "); + self.ext.append_debug_buffer(e.into()); + }; + Ok(ErrorReturnCode::get()) + }, + } + } + /// Read designated chunk from the sandbox memory. /// /// Returns `Err` if one of the following conditions occurs: @@ -633,8 +702,10 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { let ptr = ptr as usize; let mut bound_checked = memory.get(ptr..ptr + len as usize).ok_or_else(|| Error::::OutOfBounds)?; + let decoded = D::decode_all_with_depth_limit(MAX_DECODE_NESTING, &mut bound_checked) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; + Ok(decoded) } @@ -1023,6 +1094,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { // for every function. #[define_env(doc)] pub mod env { + /// Set the value at the given key in the contract storage. /// /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::set_storage`] version with the @@ -2584,7 +2656,7 @@ pub mod env { /// # Return Value /// /// Returns `ReturnCode::Success` when the dispatchable was successfully executed and - /// returned `Ok`. When the dispatchable was exeuted but returned an error + /// returned `Ok`. When the dispatchable was executed but returned an error /// `ReturnCode::CallRuntimeFailed` is returned. The full error is not /// provided because it is not guaranteed to be stable. /// @@ -2605,23 +2677,118 @@ pub mod env { call_ptr: u32, call_len: u32, ) -> Result { - use frame_support::dispatch::{extract_actual_weight, GetDispatchInfo}; + use frame_support::dispatch::GetDispatchInfo; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; let call: ::RuntimeCall = ctx.read_sandbox_memory_as_unbounded(memory, call_ptr, call_len)?; - let dispatch_info = call.get_dispatch_info(); - let charged = ctx.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; - let result = ctx.ext.call_runtime(call); - let actual_weight = extract_actual_weight(&result, &dispatch_info); - ctx.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); - match result { - Ok(_) => Ok(ReturnCode::Success), + ctx.call_dispatchable::(call.get_dispatch_info(), |ctx| { + ctx.ext.call_runtime(call) + }) + } + + /// Execute an XCM program locally, using the contract's address as the origin. + /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `msg_ptr`: the pointer into the linear memory where the [`xcm::prelude::VersionedXcm`] is + /// placed. + /// - `msg_len`: the length of the message in bytes. + /// - `output_ptr`: the pointer into the linear memory where the [`xcm::prelude::Outcome`] + /// message id is placed. + /// + /// # Return Value + /// + /// Returns `ReturnCode::Success` when the XCM was successfully executed. When the XCM + /// execution fails, `ReturnCode::XcmExecutionFailed` is returned. + #[unstable] + fn xcm_execute( + ctx: _, + memory: _, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use frame_support::dispatch::DispatchInfo; + use xcm::VersionedXcm; + use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; + + ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let message: VersionedXcm> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + + let execute_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + let weight = ctx.ext.gas_meter().gas_left().max(execute_weight); + let dispatch_info = DispatchInfo { weight, ..Default::default() }; + + ensure_executable::(&message)?; + ctx.call_dispatchable::(dispatch_info, |ctx| { + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + let outcome = <::Xcm>::execute( + origin, + Box::new(message), + weight.saturating_sub(execute_weight), + )?; + + ctx.write_sandbox_memory(memory, output_ptr, &outcome.encode())?; + let pre_dispatch_weight = + <::Xcm as ExecuteController<_, _>>::WeightInfo::execute(); + Ok(Some(outcome.weight_used().saturating_add(pre_dispatch_weight)).into()) + }) + } + + /// Send an XCM program from the contract to the specified destination. + /// This is equivalent to dispatching `pallet_xcm::send` through `call_runtime`, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `dest_ptr`: the pointer into the linear memory where the + /// [`xcm::prelude::VersionedMultiLocation`] is placed. + /// - `msg_ptr`: the pointer into the linear memory where the [`xcm::prelude::VersionedXcm`] is + /// placed. + /// - `msg_len`: the length of the message in bytes. + /// - `output_ptr`: the pointer into the linear memory where the [`xcm::v3::XcmHash`] message id + /// is placed. + /// + /// # Return Value + /// + /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM + /// execution fails, `ReturnCode::CallRuntimeFailed` is returned. + #[unstable] + fn xcm_send( + ctx: _, + memory: _, + dest_ptr: u32, + msg_ptr: u32, + msg_len: u32, + output_ptr: u32, + ) -> Result { + use xcm::{VersionedMultiLocation, VersionedXcm}; + use xcm_builder::{SendController, SendControllerWeightInfo}; + + ctx.charge_gas(RuntimeCosts::CopyFromContract(msg_len))?; + let dest: VersionedMultiLocation = ctx.read_sandbox_memory_as(memory, dest_ptr)?; + + let message: VersionedXcm<()> = + ctx.read_sandbox_memory_as_unbounded(memory, msg_ptr, msg_len)?; + let weight = <::Xcm as SendController<_>>::WeightInfo::send(); + ctx.charge_gas(RuntimeCosts::CallRuntime(weight))?; + let origin = crate::RawOrigin::Signed(ctx.ext.address().clone()).into(); + + match <::Xcm>::send(origin, dest.into(), message.into()) { + Ok(message_id) => { + ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; + Ok(ReturnCode::Success) + }, Err(e) => { if ctx.ext.append_debug_buffer("") { - ctx.ext.append_debug_buffer("seal0::call_runtime failed with: "); + ctx.ext.append_debug_buffer("seal0::xcm_send failed with: "); ctx.ext.append_debug_buffer(e.into()); }; - Ok(ReturnCode::CallRuntimeFailed) + Ok(ReturnCode::XcmSendFailed) }, } } -- GitLab From f53604362c98d596d829f37dbdf87fecbd05b7a5 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 15 Nov 2023 10:23:59 +0200 Subject: [PATCH 056/199] pallet-xcm: use XcmTeleportFilter for teleported fees in reserve transfers (#2322) Disallow reserve transfers that use teleportable fees if `(origin, fees)` matches `XcmTeleportFilter`. Add regression tests for filtering based on `XcmTeleportFilter` for both `(limited_)reserve_transfer_assets()` and `(limited_)teleport_assets` extrinsics. --- polkadot/xcm/pallet-xcm/src/lib.rs | 6 ++- polkadot/xcm/pallet-xcm/src/mock.rs | 29 +++++++++- .../pallet-xcm/src/tests/assets_transfer.rs | 54 +++++++++++++++++++ 3 files changed, 86 insertions(+), 3 deletions(-) diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 8157620465f..38ea7555fc3 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -1379,7 +1379,7 @@ impl Pallet { TransferType::DestinationReserve => Self::destination_reserve_fees_instructions(dest, fees, weight_limit)?, TransferType::Teleport => - Self::teleport_fees_instructions(dest, fees, weight_limit)?, + Self::teleport_fees_instructions(origin_location, dest, fees, weight_limit)?, TransferType::RemoteReserve(_) => return Err(Error::::InvalidAssetUnsupportedReserve.into()), }); @@ -1715,10 +1715,14 @@ impl Pallet { } fn teleport_fees_instructions( + origin: MultiLocation, dest: MultiLocation, fees: MultiAsset, weight_limit: WeightLimit, ) -> Result<(Xcm<::RuntimeCall>, Xcm<()>), Error> { + let value = (origin, vec![fees.clone()]); + ensure!(T::XcmTeleportFilter::contains(&value), Error::::Filtered); + let context = T::UniversalLocation::get(); let reanchored_fees = fees .clone() diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 026838993f1..e744cefb162 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -18,7 +18,8 @@ use codec::Encode; use frame_support::{ construct_runtime, match_types, parameter_types, traits::{ - AsEnsureOriginWithArg, ConstU128, ConstU32, Equals, Everything, EverythingBut, Nothing, + AsEnsureOriginWithArg, ConstU128, ConstU32, Contains, Equals, Everything, EverythingBut, + Nothing, }, weights::Weight, }; @@ -341,6 +342,9 @@ pub const USDT_PARA_ID: u32 = 2003; // This child parachain is not configured as trusted reserve or teleport location for any assets. pub const OTHER_PARA_ID: u32 = 2009; +// This child parachain is used for filtered/disallowed assets. +pub const FILTERED_PARA_ID: u32 = 2010; + parameter_types! { pub const RelayLocation: MultiLocation = Here.into_location(); pub const NativeAsset: MultiAsset = MultiAsset { @@ -384,6 +388,17 @@ parameter_types! { interior: X1(Parachain(USDT_PARA_ID)), }), }; + pub const FilteredTeleportLocation: MultiLocation = MultiLocation { + parents: 0, + interior: X1(Parachain(FILTERED_PARA_ID)) + }; + pub const FilteredTeleportAsset: MultiAsset = MultiAsset { + fun: Fungible(10), + id: Concrete(MultiLocation { + parents: 0, + interior: X1(Parachain(FILTERED_PARA_ID)), + }), + }; pub const AnyNetwork: Option = None; pub UniversalLocation: InteriorMultiLocation = Here; pub UnitWeightCost: u64 = 1_000; @@ -430,6 +445,7 @@ parameter_types! { pub TrustedLocal: (MultiAssetFilter, MultiLocation) = (All.into(), Here.into()); pub TrustedSystemPara: (MultiAssetFilter, MultiLocation) = (NativeAsset::get().into(), SystemParachainLocation::get()); pub TrustedUsdt: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), UsdtTeleportLocation::get()); + pub TrustedFilteredTeleport: (MultiAssetFilter, MultiLocation) = (FilteredTeleportAsset::get().into(), FilteredTeleportLocation::get()); pub TeleportUsdtToForeign: (MultiAssetFilter, MultiLocation) = (Usdt::get().into(), ForeignReserveLocation::get()); pub TrustedForeign: (MultiAssetFilter, MultiLocation) = (ForeignAsset::get().into(), ForeignReserveLocation::get()); pub TrustedUsdc: (MultiAssetFilter, MultiLocation) = (Usdc::get().into(), UsdcReserveLocation::get()); @@ -466,6 +482,7 @@ impl xcm_executor::Config for XcmConfig { Case, Case, Case, + Case, ); type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -496,6 +513,14 @@ parameter_types! { pub static AdvertisedXcmVersion: pallet_xcm::XcmVersion = 3; } +pub struct XcmTeleportFiltered; +impl Contains<(MultiLocation, Vec)> for XcmTeleportFiltered { + fn contains(t: &(MultiLocation, Vec)) -> bool { + let filtered = FilteredTeleportAsset::get(); + t.1.iter().any(|asset| asset == &filtered) + } +} + impl pallet_xcm::Config for Test { type RuntimeEvent = RuntimeEvent; type SendXcmOrigin = xcm_builder::EnsureXcmOrigin; @@ -503,7 +528,7 @@ impl pallet_xcm::Config for Test { type ExecuteXcmOrigin = xcm_builder::EnsureXcmOrigin; type XcmExecuteFilter = Everything; type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; + type XcmTeleportFilter = EverythingBut; type XcmReserveTransferFilter = Everything; type Weigher = FixedWeightBounds; type UniversalLocation = UniversalLocation; diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index b02b0fd33c3..bf39e1ca288 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -117,6 +117,30 @@ fn limited_teleport_assets_works() { ); } +/// `limited_teleport_assets` should fail for filtered assets +#[test] +fn limited_teleport_filtered_assets_disallowed() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { + let result = XcmPallet::limited_teleport_assets( + RuntimeOrigin::signed(ALICE), + Box::new(FilteredTeleportLocation::get().into()), + Box::new(beneficiary.into()), + Box::new(FilteredTeleportAsset::get().into()), + 0, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + }); +} + /// Test `reserve_transfer_assets_with_paid_router_works` /// /// Asserts that the sender's balance is decreased and the beneficiary's balance @@ -1403,3 +1427,33 @@ fn reserve_transfer_assets_with_teleportable_asset_fails() { assert_eq!(Assets::active_issuance(usdt_id_multilocation), usdt_initial_local_amount); }); } + +/// Test `reserve_transfer_assets` with teleportable fee that is filtered - should fail. +#[test] +fn reserve_transfer_assets_with_filtered_teleported_fee_disallowed() { + let beneficiary: MultiLocation = AccountId32 { network: None, id: BOB.into() }.into(); + new_test_ext_with_balances(vec![(ALICE, INITIAL_BALANCE)]).execute_with(|| { + let (assets, fee_index, _, _) = into_multiassets_checked( + // FilteredTeleportAsset for fees - teleportable but filtered + FilteredTeleportAsset::get().into(), + // native asset to transfer (not used for fees) - local reserve + (MultiLocation::here(), SEND_AMOUNT).into(), + ); + let result = XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(FilteredTeleportLocation::get().into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ); + assert_eq!( + result, + Err(DispatchError::Module(ModuleError { + index: 4, + error: [2, 0, 0, 0], + message: Some("Filtered") + })) + ); + }); +} -- GitLab From 18165ebba88d41b4ba314685e5468ec0e809d799 Mon Sep 17 00:00:00 2001 From: Dmitry Markin Date: Wed, 15 Nov 2023 10:33:58 +0200 Subject: [PATCH 057/199] Unify `ChainSync` actions under one enum (follow-up) (#2317) Get rid of public `ChainSync::..._requests()` functions and return all requests as actions. --------- Co-authored-by: Sebastian Kunert --- .../client/network/sync/src/chain_sync.rs | 50 +++++++++++++---- substrate/client/network/sync/src/engine.rs | 55 +++++++++---------- substrate/client/network/sync/src/lib.rs | 2 +- .../client/network/sync/src/service/mod.rs | 4 +- .../{chain_sync.rs => syncing_service.rs} | 6 +- substrate/client/network/sync/src/warp.rs | 2 +- substrate/client/network/test/src/lib.rs | 2 +- 7 files changed, 74 insertions(+), 47 deletions(-) rename substrate/client/network/sync/src/service/{chain_sync.rs => syncing_service.rs} (98%) diff --git a/substrate/client/network/sync/src/chain_sync.rs b/substrate/client/network/sync/src/chain_sync.rs index 2adc6d42341..3825cfa33f7 100644 --- a/substrate/client/network/sync/src/chain_sync.rs +++ b/substrate/client/network/sync/src/chain_sync.rs @@ -191,6 +191,10 @@ pub enum ChainSyncAction { SendBlockRequest { peer_id: PeerId, request: BlockRequest }, /// Drop stale block request. CancelBlockRequest { peer_id: PeerId }, + /// Send state request to peer. + SendStateRequest { peer_id: PeerId, request: OpaqueStateRequest }, + /// Send warp proof request to peer. + SendWarpProofRequest { peer_id: PeerId, request: WarpProofRequest }, /// Peer misbehaved. Disconnect, report it and cancel the block request to it. DropPeer(BadPeer), /// Import blocks. @@ -1420,11 +1424,6 @@ where .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } - /// Check if the peer is known to the sync state machine. Used for sanity checks. - pub fn is_peer_known(&self, peer_id: &PeerId) -> bool { - self.peers.contains_key(peer_id) - } - /// Get the set of downloaded blocks that are ready to be queued for import. fn ready_blocks(&mut self) -> Vec> { self.blocks @@ -1537,7 +1536,7 @@ where } /// Get justification requests scheduled by sync to be sent out. - pub fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + fn justification_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); std::iter::from_fn(move || { @@ -1564,7 +1563,7 @@ where } /// Get block requests scheduled by sync to be sent out. - pub fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { + fn block_requests(&mut self) -> Vec<(PeerId, BlockRequest)> { if self.mode == SyncMode::Warp { return self .warp_target_block_request() @@ -1691,7 +1690,7 @@ where } /// Get a state request scheduled by sync to be sent out (if any). - pub fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { + fn state_request(&mut self) -> Option<(PeerId, OpaqueStateRequest)> { if self.allowed_requests.is_empty() { return None } @@ -1737,7 +1736,7 @@ where } /// Get a warp proof request scheduled by sync to be sent out (if any). - pub fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { if let Some(sync) = &self.warp_sync { if self.allowed_requests.is_empty() || sync.is_complete() || @@ -2025,7 +2024,38 @@ where /// Get pending actions to perform. #[must_use] - pub fn take_actions(&mut self) -> impl Iterator> { + pub fn actions(&mut self) -> impl Iterator> { + let block_requests = self + .block_requests() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); + self.actions.extend(block_requests); + + let justification_requests = self + .justification_requests() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendBlockRequest { peer_id, request }); + self.actions.extend(justification_requests); + + let state_request = self + .state_request() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendStateRequest { peer_id, request }); + self.actions.extend(state_request); + + let warp_proof_request = self + .warp_sync_request() + .into_iter() + .map(|(peer_id, request)| ChainSyncAction::SendWarpProofRequest { peer_id, request }); + self.actions.extend(warp_proof_request); + + std::mem::take(&mut self.actions).into_iter() + } + + /// A version of `actions()` that doesn't schedule extra requests. For testing only. + #[cfg(test)] + #[must_use] + fn take_actions(&mut self) -> impl Iterator> { std::mem::take(&mut self.actions).into_iter() } } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 58a9fdc49f2..2cb8eab22f7 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -30,7 +30,7 @@ use crate::{ schema::v1::{StateRequest, StateResponse}, service::{ self, - chain_sync::{SyncingService, ToServiceCommand}, + syncing_service::{SyncingService, ToServiceCommand}, }, types::{ BadPeer, ExtendedPeerInfo, OpaqueStateRequest, OpaqueStateResponse, PeerRequest, SyncEvent, @@ -713,16 +713,13 @@ where self.is_major_syncing .store(self.chain_sync.status().state.is_major_syncing(), Ordering::Relaxed); - // Process actions requested by `ChainSync` during `select!`. + // Process actions requested by `ChainSync`. self.process_chain_sync_actions(); - - // Send outbound requests on `ChanSync`'s behalf. - self.send_chain_sync_requests(); } } fn process_chain_sync_actions(&mut self) { - self.chain_sync.take_actions().for_each(|action| match action { + self.chain_sync.actions().for_each(|action| match action { ChainSyncAction::SendBlockRequest { peer_id, request } => { // Sending block request implies dropping obsolete pending response as we are not // interested in it anymore (see [`ChainSyncAction::SendBlockRequest`]). @@ -741,7 +738,25 @@ where ChainSyncAction::CancelBlockRequest { peer_id } => { let removed = self.pending_responses.remove(&peer_id); - trace!(target: LOG_TARGET, "Processed {action:?}., response removed: {removed}."); + trace!(target: LOG_TARGET, "Processed {action:?}, response removed: {removed}."); + }, + ChainSyncAction::SendStateRequest { peer_id, request } => { + self.send_state_request(peer_id, request); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendBlockRequest` to {peer_id}.", + ); + }, + ChainSyncAction::SendWarpProofRequest { peer_id, request } => { + self.send_warp_proof_request(peer_id, request.clone()); + + trace!( + target: LOG_TARGET, + "Processed `ChainSyncAction::SendWarpProofRequest` to {}, request: {:?}.", + peer_id, + request, + ); }, ChainSyncAction::DropPeer(BadPeer(peer_id, rep)) => { self.pending_responses.remove(&peer_id); @@ -1104,26 +1119,8 @@ where Ok(()) } - fn send_chain_sync_requests(&mut self) { - for (peer_id, request) in self.chain_sync.block_requests() { - self.send_block_request(peer_id, request); - } - - if let Some((peer_id, request)) = self.chain_sync.state_request() { - self.send_state_request(peer_id, request); - } - - for (peer_id, request) in self.chain_sync.justification_requests() { - self.send_block_request(peer_id, request); - } - - if let Some((peer_id, request)) = self.chain_sync.warp_sync_request() { - self.send_warp_sync_request(peer_id, request); - } - } - fn send_block_request(&mut self, peer_id: PeerId, request: BlockRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send block request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1139,7 +1136,7 @@ where } fn send_state_request(&mut self, peer_id: PeerId, request: OpaqueStateRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send state request to unknown peer {peer_id}"); debug_assert!(false); return @@ -1168,8 +1165,8 @@ where } } - fn send_warp_sync_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { - if !self.chain_sync.is_peer_known(&peer_id) { + fn send_warp_proof_request(&mut self, peer_id: PeerId, request: WarpProofRequest) { + if !self.peers.contains_key(&peer_id) { trace!(target: LOG_TARGET, "Cannot send warp proof request to unknown peer {peer_id}"); debug_assert!(false); return diff --git a/substrate/client/network/sync/src/lib.rs b/substrate/client/network/sync/src/lib.rs index c42b0601e65..1a7e773c95f 100644 --- a/substrate/client/network/sync/src/lib.rs +++ b/substrate/client/network/sync/src/lib.rs @@ -18,7 +18,7 @@ //! Blockchain syncing implementation in Substrate. -pub use service::chain_sync::SyncingService; +pub use service::syncing_service::SyncingService; pub use types::{SyncEvent, SyncEventStream, SyncState, SyncStatus, SyncStatusProvider}; mod block_announce_validator; diff --git a/substrate/client/network/sync/src/service/mod.rs b/substrate/client/network/sync/src/service/mod.rs index 18331d63ed2..d045af26e70 100644 --- a/substrate/client/network/sync/src/service/mod.rs +++ b/substrate/client/network/sync/src/service/mod.rs @@ -16,8 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! `ChainSync`-related service code +//! `SyncingEngine`-related service code -pub mod chain_sync; pub mod mock; pub mod network; +pub mod syncing_service; diff --git a/substrate/client/network/sync/src/service/chain_sync.rs b/substrate/client/network/sync/src/service/syncing_service.rs similarity index 98% rename from substrate/client/network/sync/src/service/chain_sync.rs rename to substrate/client/network/sync/src/service/syncing_service.rs index 3d11880c511..92d649d65dc 100644 --- a/substrate/client/network/sync/src/service/chain_sync.rs +++ b/substrate/client/network/sync/src/service/syncing_service.rs @@ -34,7 +34,7 @@ use std::{ }, }; -/// Commands send to `ChainSync` +/// Commands send to `SyncingEngine` pub enum ToServiceCommand { SetSyncForkRequest(Vec, B::Hash, NumberFor), RequestJustification(B::Hash, NumberFor), @@ -63,7 +63,7 @@ pub enum ToServiceCommand { // }, } -/// Handle for communicating with `ChainSync` asynchronously +/// Handle for communicating with `SyncingEngine` asynchronously #[derive(Clone)] pub struct SyncingService { tx: TracingUnboundedSender>, @@ -148,7 +148,7 @@ impl SyncingService { /// Get sync status /// - /// Returns an error if `ChainSync` has terminated. + /// Returns an error if `SyncingEngine` has terminated. pub async fn status(&self) -> Result, ()> { let (tx, rx) = oneshot::channel(); let _ = self.tx.unbounded_send(ToServiceCommand::Status(tx)); diff --git a/substrate/client/network/sync/src/warp.rs b/substrate/client/network/sync/src/warp.rs index 2c0adc856c1..169b3de35aa 100644 --- a/substrate/client/network/sync/src/warp.rs +++ b/substrate/client/network/sync/src/warp.rs @@ -42,7 +42,7 @@ const LOG_TARGET: &'static str = "sync"; pub struct EncodedProof(pub Vec); /// Warp sync request -#[derive(Encode, Decode, Debug)] +#[derive(Encode, Decode, Debug, Clone)] pub struct WarpProofRequest { /// Start collecting proofs from this block. pub begin: B::Hash, diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index f869e3a171a..cfc3cb7af3f 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -64,7 +64,7 @@ use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; use sc_network_sync::{ block_request_handler::BlockRequestHandler, - service::{chain_sync::SyncingService, network::NetworkServiceProvider}, + service::{network::NetworkServiceProvider, syncing_service::SyncingService}, state_request_handler::StateRequestHandler, warp::{ AuthorityList, EncodedProof, SetId, VerificationResult, WarpSyncParams, WarpSyncProvider, -- GitLab From 5b0622bc4d0cf5f0f57904874ea90eb08de79c52 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 15 Nov 2023 14:28:32 +0100 Subject: [PATCH 058/199] [CI] Prepare CI for Merge Queues (#2308) PR prepares CI to the GitHub Merge Queues. All github actions that were running in PR adjusted so they can run in the merge queues. Zombienet jobs will do nothing during PRs but they will run during merge queues. Jobs that will be skipped during PR: - all zombienet jobs - all publish docker jobs Jobs that will be skipped during merge queue: - check-labels - check-prdoc - pr-custom-review - review trigger cc https://github.com/paritytech/ci_cd/issues/862 --- .github/workflows/check-labels.yml | 4 ++++ .github/workflows/check-licenses.yml | 1 + .github/workflows/check-links.yml | 1 + .github/workflows/check-markdown.yml | 5 +++-- .github/workflows/check-prdoc.yml | 4 ++++ .github/workflows/check-publish.yml | 1 + .github/workflows/fmt-check.yml | 1 + .github/workflows/gitspiegel-trigger.yml | 1 + .github/workflows/pr-custom-review.yml | 4 ++++ .github/workflows/review-trigger.yml | 6 +++++- .gitlab-ci.yml | 8 +++++++- .gitlab/pipeline/publish.yml | 4 ++-- .gitlab/pipeline/zombienet/cumulus.yml | 2 ++ .gitlab/pipeline/zombienet/polkadot.yml | 18 +++++++++++++----- .gitlab/pipeline/zombienet/substrate.yml | 2 ++ 15 files changed, 51 insertions(+), 11 deletions(-) diff --git a/.github/workflows/check-labels.yml b/.github/workflows/check-labels.yml index 83b52e82313..97562f0da09 100644 --- a/.github/workflows/check-labels.yml +++ b/.github/workflows/check-labels.yml @@ -3,11 +3,15 @@ name: Check labels on: pull_request: types: [labeled, opened, synchronize, unlabeled] + merge_group: jobs: check-labels: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Pull image env: IMAGE: paritytech/ruled_labels:0.4.0 diff --git a/.github/workflows/check-licenses.yml b/.github/workflows/check-licenses.yml index 50dd10a6d3c..a66e3a53998 100644 --- a/.github/workflows/check-licenses.yml +++ b/.github/workflows/check-licenses.yml @@ -2,6 +2,7 @@ name: Check licenses on: pull_request: + merge_group: permissions: packages: read diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml index 3ed6ba84b82..0932d38c9ad 100644 --- a/.github/workflows/check-links.yml +++ b/.github/workflows/check-links.yml @@ -8,6 +8,7 @@ on: - ".github/workflows/check-links.yml" - ".config/lychee.toml" types: [opened, synchronize, reopened, ready_for_review] + merge_group: permissions: packages: read diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml index 05b5d898d67..2108f942090 100644 --- a/.github/workflows/check-markdown.yml +++ b/.github/workflows/check-markdown.yml @@ -3,6 +3,7 @@ name: Check Markdown on: pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: permissions: packages: read @@ -23,8 +24,8 @@ jobs: - name: Install tooling run: | - npm install -g markdownlint-cli - markdownlint --version + npm install -g markdownlint-cli + markdownlint --version - name: Check Markdown env: diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 690f7a3f133..54e4d2b9680 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -3,6 +3,7 @@ name: Check PRdoc on: pull_request: types: [labeled, opened, synchronize, unlabeled] + merge_group: env: IMAGE: paritytech/prdoc:v0.0.5 @@ -17,6 +18,9 @@ jobs: check-prdoc: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Pull image run: | echo "Pulling $IMAGE" diff --git a/.github/workflows/check-publish.yml b/.github/workflows/check-publish.yml index 9ab47dba51b..c0d2b889381 100644 --- a/.github/workflows/check-publish.yml +++ b/.github/workflows/check-publish.yml @@ -6,6 +6,7 @@ on: - master pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: jobs: check-publish: diff --git a/.github/workflows/fmt-check.yml b/.github/workflows/fmt-check.yml index 7ca4413bb05..e4d39acabfd 100644 --- a/.github/workflows/fmt-check.yml +++ b/.github/workflows/fmt-check.yml @@ -6,6 +6,7 @@ on: - master pull_request: types: [opened, synchronize, reopened, ready_for_review] + merge_group: jobs: quick_check: diff --git a/.github/workflows/gitspiegel-trigger.yml b/.github/workflows/gitspiegel-trigger.yml index dce3aaf2fec..59347fad6d6 100644 --- a/.github/workflows/gitspiegel-trigger.yml +++ b/.github/workflows/gitspiegel-trigger.yml @@ -13,6 +13,7 @@ on: - unlocked - ready_for_review - reopened + merge_group: jobs: sync: diff --git a/.github/workflows/pr-custom-review.yml b/.github/workflows/pr-custom-review.yml index b15d20c696f..4e0809cbfdc 100644 --- a/.github/workflows/pr-custom-review.yml +++ b/.github/workflows/pr-custom-review.yml @@ -14,11 +14,15 @@ on: - ready_for_review - converted_to_draft pull_request_review: + merge_group: jobs: pr-custom-review: runs-on: ubuntu-latest steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Skip if pull request is in Draft # `if: github.event.pull_request.draft == true` should be kept here, at # the step level, rather than at the job level. The latter is not diff --git a/.github/workflows/review-trigger.yml b/.github/workflows/review-trigger.yml index 2810ea356e6..e5fcb434fd3 100644 --- a/.github/workflows/review-trigger.yml +++ b/.github/workflows/review-trigger.yml @@ -1,6 +1,6 @@ name: Review-Trigger -on: +on: pull_request_target: types: - opened @@ -10,6 +10,7 @@ on: - review_request_removed - ready_for_review pull_request_review: + merge_group: jobs: trigger-review-bot: @@ -18,6 +19,9 @@ jobs: runs-on: ubuntu-latest name: trigger review bot steps: + - name: Skip merge queue + if: ${{ contains(github.ref, 'gh-readonly-queue') }} + run: exit 0 - name: Get PR number env: PR_NUMBER: ${{ github.event.pull_request.number }} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f507afda23e..1dc483004f2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -31,7 +31,7 @@ variables: NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.79" - DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" + DOCKER_IMAGES_VERSION: "${CI_COMMIT_SHA}" default: retry: @@ -136,11 +136,13 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .test-pr-refs: rules: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues # handle the specific case where benches could store incorrect bench data because of the downstream staging runs # exclude cargo-check-benches from such runs @@ -152,6 +154,7 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .test-refs-no-trigger: @@ -162,6 +165,7 @@ default: - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ @@ -172,6 +176,7 @@ default: - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues .publish-refs: rules: @@ -192,6 +197,7 @@ default: - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs + - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues .zombienet-refs: extends: .build-refs diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index a03d407c040..f2308c334e0 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -71,8 +71,8 @@ publish-rustdoc: DOCKERFILE: "" # docker/path-to.Dockerfile IMAGE_NAME: "" # docker.io/paritypr/image_name script: - # - test "$PARITYPR_USER" -a "$PARITYPR_PASS" || - # ( echo "no docker credentials provided"; exit 1 ) + # Exit if the job is not running in a merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - $BUILDAH_COMMAND build --format=docker --build-arg VCS_REF="${CI_COMMIT_SHA}" diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 3f2c6f64fbf..c8a1df004e3 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -3,6 +3,8 @@ .zombienet-before-script: before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${POLKADOT_IMAGE}" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 8fc8b280bba..cc960557298 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -4,6 +4,8 @@ # common settings for all zombienet jobs .zombienet-polkadot-common: before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" # from build-linux-stable job - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} @@ -12,12 +14,12 @@ - export MALUS_IMAGE="${MALUS_IMAGE}":${PIPELINE_IMAGE_TAG} - IMAGE_AVAILABLE=$(curl -o /dev/null -w "%{http_code}" -I -L -s https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/${BUILD_RELEASE_VERSION}) - if [ $IMAGE_AVAILABLE -eq 200 ]; then - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"; + export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}"; else - echo "Getting the image to use as SECONDARY, using ${BUILD_RELEASE_VERSION} as base"; - VERSIONS=$(curl -L -s 'https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/' | jq -r '.results[].name'| grep -E "v[0-9]" |grep -vE "[0-9]-"); - VERSION_TO_USE=$(echo "${BUILD_RELEASE_VERSION}\n$VERSIONS"|sort -r|grep -A1 "${BUILD_RELEASE_VERSION}"|tail -1); - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${VERSION_TO_USE}"; + echo "Getting the image to use as SECONDARY, using ${BUILD_RELEASE_VERSION} as base"; + VERSIONS=$(curl -L -s 'https://registry.hub.docker.com/v2/repositories/parity/polkadot/tags/' | jq -r '.results[].name'| grep -E "v[0-9]" |grep -vE "[0-9]-"); + VERSION_TO_USE=$(echo "${BUILD_RELEASE_VERSION}\n$VERSIONS"|sort -r|grep -A1 "${BUILD_RELEASE_VERSION}"|tail -1); + export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${VERSION_TO_USE}"; fi - echo "Zombienet Tests Config" - echo "gh-dir ${GH_DIR}" @@ -117,6 +119,8 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" @@ -134,6 +138,8 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: extends: - .zombienet-polkadot-common before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" @@ -176,6 +182,8 @@ zombienet-polkadot-misc-0002-upgrade-node: - job: build-linux-stable artifacts: true before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="docker.io/parity/polkadot:latest" - echo "Overrided poladot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 9e14ebe0852..e627575a31a 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -4,6 +4,8 @@ # common settings for all zombienet jobs .zombienet-substrate-common: before_script: + # Exit if the job is not merge queue + - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombienet Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${GH_DIR}" -- GitLab From c79b234b3bb9d02ee5145c8d65a497104b5d3ea7 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:22:28 +0100 Subject: [PATCH 059/199] Identity Deposits Relay to Parachain Migration (#1814) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The goal of this PR is to migrate Identity deposits from the Relay Chain to a system parachain. The problem I want to solve is that `IdentityOf` and `SubsOf` both store an amount that's held in reserve as a storage deposit. When migrating to a parachain, we can take a snapshot of the actual `IdentityInfo` and sub-account mappings, but should migrate (off chain) the `deposit`s to zero, since the chain (and by extension, accounts) won't have any funds at genesis. The good news is that we expect parachain deposits to be significantly lower (possibly 100x) on the parachain. That is, a deposit of 21 DOT on the Relay Chain would need 0.21 DOT on a parachain. This PR proposes to migrate the deposits in the following way: 1. Introduces a new pallet with two extrinsics: - `reap_identity`: Has a configurable `ReapOrigin`, which would be set to `EnsureSigned` on the Relay Chain (i.e. callable by anyone) and `EnsureRoot` on the parachain (we don't want identities reaped from there). - `poke_deposit`: Checks what deposit the pallet holds (at genesis, zero) and attempts to update the amount based on the calculated deposit for storage data. 2. `reap_identity` clears all storage data for a `target` account and unreserves their deposit. 3. A `ReapIdentityHandler` teleports the necessary DOT to the parachain and calls `poke_deposit`. Since the parachain deposit is much lower, and was just unreserved, we know we have enough. One awkwardness I ran into was that the XCMv3 instruction set does not provide a way for the system to teleport assets without a fee being deducted on reception. Users shouldn't have to pay a fee for the system to migrate their info to a more efficient location. So I wrote my own program and did the `InitiateTeleport` accounting on my own to send a program with `UnpaidExecution`. Have discussed an `InitiateUnpaidTeleport` instruction with @franciscoaguirre . Obviously any chain executing this would have to pass a `Barrier` for free execution. TODO: - [x] Confirm People Chain ParaId - [x] Confirm People Chain deposit rates (determined in https://github.com/paritytech/polkadot-sdk/pull/2281) - [x] Add pallet to Westend --------- Co-authored-by: Bastian Köcher --- Cargo.lock | 1 + polkadot/runtime/common/Cargo.toml | 4 + .../runtime/common/src/identity_migrator.rs | 305 ++++++++++++++++++ .../runtime/common/src/integration_tests.rs | 29 +- polkadot/runtime/common/src/lib.rs | 1 + polkadot/runtime/rococo/src/impls.rs | 158 +++++++++ polkadot/runtime/rococo/src/lib.rs | 39 ++- polkadot/runtime/rococo/src/weights/mod.rs | 1 + .../runtime_common_identity_migrator.rs | 97 ++++++ polkadot/runtime/westend/src/impls.rs | 158 +++++++++ polkadot/runtime/westend/src/lib.rs | 33 +- polkadot/runtime/westend/src/weights/mod.rs | 1 + .../runtime_common_identity_migrator.rs | 97 ++++++ substrate/frame/identity/src/lib.rs | 133 +++++++- substrate/frame/identity/src/tests.rs | 67 ++++ 15 files changed, 1111 insertions(+), 13 deletions(-) create mode 100644 polkadot/runtime/common/src/identity_migrator.rs create mode 100644 polkadot/runtime/rococo/src/impls.rs create mode 100644 polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs create mode 100644 polkadot/runtime/westend/src/impls.rs create mode 100644 polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs diff --git a/Cargo.lock b/Cargo.lock index 9ae1bb905b2..b1a46a88f38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13047,6 +13047,7 @@ dependencies = [ "pallet-balances", "pallet-election-provider-multi-phase", "pallet-fast-unstake", + "pallet-identity", "pallet-session", "pallet-staking", "pallet-staking-reward-fn", diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 0882e555aaf..4391b6d81eb 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -30,6 +30,7 @@ sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", def pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-fast-unstake = { path = "../../../substrate/frame/fast-unstake", default-features = false } +pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } pallet-session = { path = "../../../substrate/frame/session", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false } pallet-staking = { path = "../../../substrate/frame/staking", default-features = false } @@ -85,6 +86,7 @@ std = [ "pallet-balances/std", "pallet-election-provider-multi-phase/std", "pallet-fast-unstake/std", + "pallet-identity/std", "pallet-session/std", "pallet-staking-reward-fn/std", "pallet-staking/std", @@ -124,6 +126,7 @@ runtime-benchmarks = [ "pallet-balances/runtime-benchmarks", "pallet-election-provider-multi-phase/runtime-benchmarks", "pallet-fast-unstake/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", @@ -147,6 +150,7 @@ try-runtime = [ "pallet-balances/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-fast-unstake/try-runtime", + "pallet-identity/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", "pallet-timestamp/try-runtime", diff --git a/polkadot/runtime/common/src/identity_migrator.rs b/polkadot/runtime/common/src/identity_migrator.rs new file mode 100644 index 00000000000..cc2c3ce7773 --- /dev/null +++ b/polkadot/runtime/common/src/identity_migrator.rs @@ -0,0 +1,305 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This pallet is designed to go into a source chain and destination chain to migrate data. The +//! design motivations are: +//! +//! - Call some function on the source chain that executes some migration (clearing state, +//! forwarding an XCM program). +//! - Call some function (probably from an XCM program) on the destination chain. +//! - Avoid cluttering the source pallet with new dispatchables that are unrelated to its +//! functionality and only used for migration. +//! +//! After the migration is complete, the pallet may be removed from both chains' runtimes as well as +//! the `polkadot-runtime-common` crate. + +use frame_support::{dispatch::DispatchResult, traits::Currency, weights::Weight}; +pub use pallet::*; +use pallet_identity; +use sp_core::Get; + +#[cfg(feature = "runtime-benchmarks")] +use frame_benchmarking::{account, impl_benchmark_test_suite, v2::*, BenchmarkError}; + +pub trait WeightInfo { + fn reap_identity(r: u32, s: u32) -> Weight; + fn poke_deposit() -> Weight; +} + +impl WeightInfo for () { + fn reap_identity(_r: u32, _s: u32) -> Weight { + Weight::MAX + } + fn poke_deposit() -> Weight { + Weight::MAX + } +} + +pub struct TestWeightInfo; +impl WeightInfo for TestWeightInfo { + fn reap_identity(_r: u32, _s: u32) -> Weight { + Weight::zero() + } + fn poke_deposit() -> Weight { + Weight::zero() + } +} + +// Must use the same `Balance` as `T`'s Identity pallet to handle deposits. +type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::{ + dispatch::{DispatchResultWithPostInfo, PostDispatchInfo}, + pallet_prelude::*, + traits::EnsureOrigin, + }; + use frame_system::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_identity::Config { + /// Overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + + /// The origin that can reap identities. Expected to be `EnsureSigned` on the + /// source chain such that anyone can all this function. + type Reaper: EnsureOrigin; + + /// A handler for what to do when an identity is reaped. + type ReapIdentityHandler: OnReapIdentity; + + /// Weight information for the extrinsics in the pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// The identity and all sub accounts were reaped for `who`. + IdentityReaped { who: T::AccountId }, + /// The deposits held for `who` were updated. `identity` is the new deposit held for + /// identity info, and `subs` is the new deposit held for the sub-accounts. + DepositUpdated { who: T::AccountId, identity: BalanceOf, subs: BalanceOf }, + } + + #[pallet::call] + impl Pallet { + /// Reap the `IdentityInfo` of `who` from the Identity pallet of `T`, unreserving any + /// deposits held and removing storage items associated with `who`. + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::reap_identity( + T::MaxRegistrars::get(), + T::MaxSubAccounts::get() + ))] + pub fn reap_identity( + origin: OriginFor, + who: T::AccountId, + ) -> DispatchResultWithPostInfo { + T::Reaper::ensure_origin(origin)?; + // - number of registrars (required to calculate weight) + // - byte size of `IdentityInfo` (required to calculate remote deposit) + // - number of sub accounts (required to calculate both weight and remote deposit) + let (registrars, bytes, subs) = pallet_identity::Pallet::::reap_identity(&who)?; + T::ReapIdentityHandler::on_reap_identity(&who, bytes, subs)?; + Self::deposit_event(Event::IdentityReaped { who }); + let post = PostDispatchInfo { + actual_weight: Some(::WeightInfo::reap_identity( + registrars, subs, + )), + pays_fee: Pays::No, + }; + Ok(post) + } + + /// Update the deposit of `who`. Meant to be called by the system with an XCM `Transact` + /// Instruction. + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::poke_deposit())] + pub fn poke_deposit(origin: OriginFor, who: T::AccountId) -> DispatchResultWithPostInfo { + ensure_root(origin)?; + let (id_deposit, subs_deposit) = pallet_identity::Pallet::::poke_deposit(&who)?; + Self::deposit_event(Event::DepositUpdated { + who, + identity: id_deposit, + subs: subs_deposit, + }); + Ok(Pays::No.into()) + } + } +} + +/// Trait to handle reaping identity from state. +pub trait OnReapIdentity { + /// What to do when an identity is reaped. For example, the implementation could send an XCM + /// program to another chain. Concretely, a type implementing this trait in the Polkadot + /// runtime would teleport enough DOT to the People Chain to cover the Identity deposit there. + /// + /// This could also directly include `Transact { poke_deposit(..), ..}`. + /// + /// Inputs + /// - `who`: Whose identity was reaped. + /// - `bytes`: The byte size of `IdentityInfo`. + /// - `subs`: The number of sub-accounts they had. + fn on_reap_identity(who: &AccountId, bytes: u32, subs: u32) -> DispatchResult; +} + +impl OnReapIdentity for () { + fn on_reap_identity(_who: &AccountId, _bytes: u32, _subs: u32) -> DispatchResult { + Ok(()) + } +} + +#[cfg(feature = "runtime-benchmarks")] +#[benchmarks] +mod benchmarks { + use super::*; + use frame_support::traits::EnsureOrigin; + use frame_system::RawOrigin; + use pallet_identity::{Data, IdentityInformationProvider, Judgement, Pallet as Identity}; + use parity_scale_codec::Encode; + use sp_runtime::{ + traits::{Bounded, Hash, StaticLookup}, + Saturating, + }; + use sp_std::{boxed::Box, vec::Vec, *}; + + const SEED: u32 = 0; + + fn assert_last_event(generic_event: ::RuntimeEvent) { + let events = frame_system::Pallet::::events(); + let system_event: ::RuntimeEvent = generic_event.into(); + let frame_system::EventRecord { event, .. } = &events[events.len() - 1]; + assert_eq!(event, &system_event); + } + + #[benchmark] + fn reap_identity( + r: Linear<0, { T::MaxRegistrars::get() }>, + s: Linear<0, { T::MaxSubAccounts::get() }>, + ) -> Result<(), BenchmarkError> { + // set up target + let target: T::AccountId = account("target", 0, SEED); + let target_origin = + ::RuntimeOrigin::from(RawOrigin::Signed(target.clone())); + let target_lookup = T::Lookup::unlookup(target.clone()); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + + // set identity + let info = ::IdentityInformation::create_identity_info(); + Identity::::set_identity( + RawOrigin::Signed(target.clone()).into(), + Box::new(info.clone()), + )?; + + // create and set subs + let mut subs = Vec::new(); + let data = Data::Raw(vec![0; 32].try_into().unwrap()); + for ii in 0..s { + let sub_account = account("sub", ii, SEED); + subs.push((sub_account, data.clone())); + } + Identity::::set_subs(target_origin.clone(), subs.clone())?; + + // add registrars and provide judgements + let registrar_origin = T::RegistrarOrigin::try_successful_origin() + .expect("RegistrarOrigin has no successful origin required for the benchmark"); + for ii in 0..r { + // registrar account + let registrar: T::AccountId = account("registrar", ii, SEED); + let registrar_lookup = T::Lookup::unlookup(registrar.clone()); + let _ = ::Currency::make_free_balance_be( + ®istrar, + ::Currency::minimum_balance(), + ); + + // add registrar + Identity::::add_registrar(registrar_origin.clone(), registrar_lookup)?; + Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), ii, 10u32.into())?; + let fields = ::IdentityInformation::all_fields(); + Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), ii, fields)?; + + // request and provide judgement + Identity::::request_judgement(target_origin.clone(), ii, 10u32.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(registrar).into(), + ii, + target_lookup.clone(), + Judgement::Reasonable, + ::Hashing::hash_of(&info), + )?; + } + + let origin = T::Reaper::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?; + + #[extrinsic_call] + _(origin as T::RuntimeOrigin, target.clone()); + + assert_last_event::(Event::::IdentityReaped { who: target.clone() }.into()); + + let fields = ::IdentityInformation::all_fields(); + assert!(!Identity::::has_identity(&target, fields)); + assert_eq!(Identity::::subs(&target).len(), 0); + + Ok(()) + } + + #[benchmark] + fn poke_deposit() -> Result<(), BenchmarkError> { + let target: T::AccountId = account("target", 0, SEED); + let _ = T::Currency::make_free_balance_be(&target, BalanceOf::::max_value()); + let info = ::IdentityInformation::create_identity_info(); + + let _ = Identity::::set_identity_no_deposit(&target, info.clone()); + + let sub_account: T::AccountId = account("sub", 0, SEED); + let _ = Identity::::set_sub_no_deposit(&target, sub_account.clone()); + + // expected deposits + let expected_id_deposit = ::BasicDeposit::get() + .saturating_add( + ::ByteDeposit::get() + .saturating_mul(>::from(info.encoded_size() as u32)), + ); + // only 1 sub + let expected_sub_deposit = ::SubAccountDeposit::get(); + + #[extrinsic_call] + _(RawOrigin::Root, target.clone()); + + assert_last_event::( + Event::::DepositUpdated { + who: target, + identity: expected_id_deposit, + subs: expected_sub_deposit, + } + .into(), + ); + + Ok(()) + } + + impl_benchmark_test_suite!( + Pallet, + crate::integration_tests::new_test_ext(), + crate::integration_tests::Test, + ); +} diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index d5a32775fd4..793f75e79cd 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -17,7 +17,7 @@ //! Mocking utilities for testing with real pallets. use crate::{ - auctions, crowdloan, + auctions, crowdloan, identity_migrator, mock::{conclude_pvf_checking, validators_public_keys}, paras_registrar, slot_range::SlotRange, @@ -32,6 +32,7 @@ use frame_support::{ }; use frame_support_test::TestRandomness; use frame_system::EnsureRoot; +use pallet_identity::{self, legacy::IdentityInfo}; use parity_scale_codec::Encode; use primitives::{ BlockNumber, HeadData, Id as ParaId, SessionIndex, ValidationCode, LOWEST_PUBLIC_ID, @@ -88,6 +89,10 @@ frame_support::construct_runtime!( Auctions: auctions::{Pallet, Call, Storage, Event}, Crowdloan: crowdloan::{Pallet, Call, Storage, Event}, Slots: slots::{Pallet, Call, Storage, Event}, + + // Migrators + Identity: pallet_identity::{Pallet, Call, Storage, Event}, + IdentityMigrator: identity_migrator::{Pallet, Call, Event}, } ); @@ -274,6 +279,28 @@ impl crowdloan::Config for Test { type WeightInfo = crate::crowdloan::TestWeightInfo; } +impl pallet_identity::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type Slashed = (); + type BasicDeposit = ConstU32<100>; + type ByteDeposit = ConstU32<10>; + type SubAccountDeposit = ConstU32<100>; + type MaxSubAccounts = ConstU32<2>; + type IdentityInformation = IdentityInfo>; + type MaxRegistrars = ConstU32<20>; + type RegistrarOrigin = EnsureRoot; + type ForceOrigin = EnsureRoot; + type WeightInfo = (); +} + +impl identity_migrator::Config for Test { + type RuntimeEvent = RuntimeEvent; + type Reaper = EnsureRoot; + type ReapIdentityHandler = (); + type WeightInfo = crate::identity_migrator::TestWeightInfo; +} + /// Create a new set of test externalities. pub fn new_test_ext() -> TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); diff --git a/polkadot/runtime/common/src/lib.rs b/polkadot/runtime/common/src/lib.rs index 70722d50988..bd49d3cccc9 100644 --- a/polkadot/runtime/common/src/lib.rs +++ b/polkadot/runtime/common/src/lib.rs @@ -23,6 +23,7 @@ pub mod auctions; pub mod claims; pub mod crowdloan; pub mod elections; +pub mod identity_migrator; pub mod impls; pub mod paras_registrar; pub mod paras_sudo_wrapper; diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs new file mode 100644 index 00000000000..71b1091eeb6 --- /dev/null +++ b/polkadot/runtime/rococo/src/impls.rs @@ -0,0 +1,158 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::xcm_config; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; +use primitives::Balance; +use rococo_runtime_constants::currency::*; +use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use sp_std::{marker::PhantomData, prelude::*}; +use xcm::{latest::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/rococo.rs#L29 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let roc = MultiAsset { id: Concrete(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: MultiLocation = MultiLocation::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &roc, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|_| pallet_xcm::Error::::CannotCheckOutTeleport)?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &roc, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let roc_reanchored: MultiAssets = vec![MultiAsset { + id: Concrete(MultiLocation::new(1, Here)), + fun: Fungible(total_to_send), + }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(roc_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: remote_weight_limit, + call: poke.encode().into(), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedMultiLocation::V3(destination)), + Box::new(VersionedXcm::V3(program)), + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 5a1e170862e..277c9981dab 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -31,7 +31,7 @@ use primitives::{ ValidatorIndex, PARACHAIN_KEY_TYPE_ID, }; use runtime_common::{ - assigned_slots, auctions, claims, crowdloan, impl_runtime_weights, + assigned_slots, auctions, claims, crowdloan, identity_migrator, impl_runtime_weights, impls::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter, }, @@ -68,9 +68,9 @@ use frame_support::{ genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, EitherOf, EitherOfDiverse, Everything, InstanceFilter, - KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, ProcessMessageError, - StorageMapShim, WithdrawReasons, + fungible::HoldConsideration, Contains, EitherOf, EitherOfDiverse, EverythingBut, + InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, PrivilegeCmp, ProcessMessage, + ProcessMessageError, StorageMapShim, WithdrawReasons, }, weights::{ConstantMultiplier, WeightMeter}, PalletId, @@ -114,6 +114,10 @@ mod weights; // XCM configurations. pub mod xcm_config; +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + // Governance and configurations. pub mod governance; use governance::{ @@ -166,13 +170,24 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type DbWeight = RocksDbWeight; @@ -1079,6 +1094,14 @@ impl auctions::Config for Runtime { type WeightInfo = weights::runtime_common_auctions::WeightInfo; } +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // To be changed to `EnsureSigned` once there is a People Chain to migrate to. + type Reaper = EnsureRoot; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::runtime_common_identity_migrator::WeightInfo; +} + type NisCounterpartInstance = pallet_balances::Instance2; impl pallet_balances::Config for Runtime { type Balance = Balance; @@ -1340,7 +1363,7 @@ construct_runtime! { // NIS pallet. Nis: pallet_nis::{Pallet, Call, Storage, Event, HoldReason} = 38, -// pub type NisCounterpartInstance = pallet_balances::Instance2; + // pub type NisCounterpartInstance = pallet_balances::Instance2; NisCounterpartBalances: pallet_balances:: = 45, // Parachains pallets. Start indices at 50 to leave room. @@ -1371,6 +1394,9 @@ construct_runtime! { // Pallet for sending XCM. XcmPallet: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 99, + // Pallet for migrating Identity to a parachain. To be removed post-migration. + IdentityMigrator: identity_migrator::{Pallet, Call, Event} = 248, + ParasSudoWrapper: paras_sudo_wrapper::{Pallet, Call} = 250, AssignedSlots: assigned_slots::{Pallet, Call, Storage, Event, Config} = 251, @@ -1551,6 +1577,7 @@ mod benches { [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] [runtime_common::claims, Claims] + [runtime_common::identity_migrator, IdentityMigrator] [runtime_common::slots, Slots] [runtime_common::paras_registrar, Registrar] [runtime_parachains::configuration, Configuration] diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 9c563a67d98..bd2079ce827 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -46,6 +46,7 @@ pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_claims; pub mod runtime_common_crowdloan; +pub mod runtime_common_identity_migrator; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; pub mod runtime_parachains_assigner_on_demand; diff --git a/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs new file mode 100644 index 00000000000..cec357453b6 --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/runtime_common_identity_migrator.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=2 +// --repeat=1 +// --pallet=runtime_common::identity_migrator +// --extrinsic=* +// --output=./migrator-release.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` + // Minimum execution time: 163_756_000 picoseconds. + Weight::from_parts(158_982_500, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_143_629 + .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) + // Standard Error: 228_725 + .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7229` + // Estimated: `11003` + // Minimum execution time: 137_570_000 picoseconds. + Weight::from_parts(137_570_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs new file mode 100644 index 00000000000..80105594965 --- /dev/null +++ b/polkadot/runtime/westend/src/impls.rs @@ -0,0 +1,158 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::xcm_config; +use frame_support::pallet_prelude::DispatchResult; +use frame_system::RawOrigin; +use parity_scale_codec::{Decode, Encode}; +use primitives::Balance; +use runtime_common::identity_migrator::{OnReapIdentity, WeightInfo}; +use sp_std::{marker::PhantomData, prelude::*}; +use westend_runtime_constants::currency::*; +use xcm::{latest::prelude::*, VersionedMultiLocation, VersionedXcm}; +use xcm_executor::traits::TransactAsset; + +/// A type containing the encoding of the People Chain pallets in its runtime. Used to construct any +/// remote calls. The codec index must correspond to the index of `IdentityMigrator` in the +/// `construct_runtime` of the remote chain. +#[derive(Encode, Decode)] +enum PeopleRuntimePallets { + #[codec(index = 248)] + IdentityMigrator(IdentityMigratorCalls), +} + +/// Call encoding for the calls needed from the Identity Migrator pallet. +#[derive(Encode, Decode)] +enum IdentityMigratorCalls { + #[codec(index = 1)] + PokeDeposit(AccountId), +} + +/// Type that implements `OnReapIdentity` that will send the deposit needed to store the same +/// information on a parachain, sends the deposit there, and then updates it. +pub struct ToParachainIdentityReaper(PhantomData<(Runtime, AccountId)>); +impl ToParachainIdentityReaper { + /// Calculate the balance needed on the remote chain based on the `IdentityInfo` and `Subs` on + /// this chain. The total includes: + /// + /// - Identity basic deposit + /// - `IdentityInfo` byte deposit + /// - Sub accounts deposit + /// - 2x existential deposit (1 for account existence, 1 such that the user can transact) + fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { + // Remote deposit constants. Parachain uses `deposit / 100` + // Source: + // https://github.com/paritytech/polkadot-sdk/blob/a146918/cumulus/parachains/common/src/westend.rs#L28 + // + // Parachain Deposit Configuration: + // + // pub const BasicDeposit: Balance = deposit(1, 17); + // pub const ByteDeposit: Balance = deposit(0, 1); + // pub const SubAccountDeposit: Balance = deposit(1, 53); + // pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + let para_basic_deposit = deposit(1, 17) / 100; + let para_byte_deposit = deposit(0, 1) / 100; + let para_sub_account_deposit = deposit(1, 53) / 100; + let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; + + // pallet deposits + let id_deposit = + para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); + let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); + + id_deposit + .saturating_add(subs_deposit) + .saturating_add(para_existential_deposit.saturating_mul(2)) + } +} + +impl OnReapIdentity for ToParachainIdentityReaper +where + Runtime: frame_system::Config + pallet_xcm::Config, + AccountId: Into<[u8; 32]> + Clone + Encode, +{ + fn on_reap_identity(who: &AccountId, fields: u32, subs: u32) -> DispatchResult { + use crate::{ + impls::IdentityMigratorCalls::PokeDeposit, + weights::runtime_common_identity_migrator::WeightInfo as MigratorWeights, + }; + + let total_to_send = Self::calculate_remote_deposit(fields, subs); + + // define asset / destination from relay perspective + let wnd = MultiAsset { id: Concrete(Here.into_location()), fun: Fungible(total_to_send) }; + // People Chain: ParaId 1004 + let destination: MultiLocation = MultiLocation::new(0, Parachain(1004)); + + // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support + // unpaid teleports. + + // check out + xcm_config::LocalAssetTransactor::can_check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ) + .map_err(|_| pallet_xcm::Error::::CannotCheckOutTeleport)?; + xcm_config::LocalAssetTransactor::check_out( + &destination, + &wnd, + // not used in AssetTransactor + &XcmContext { origin: None, message_id: [0; 32], topic: None }, + ); + + // reanchor + let wnd_reanchored: MultiAssets = vec![MultiAsset { + id: Concrete(MultiLocation::new(1, Here)), + fun: Fungible(total_to_send), + }] + .into(); + + let poke = PeopleRuntimePallets::::IdentityMigrator(PokeDeposit(who.clone())); + let remote_weight_limit = MigratorWeights::::poke_deposit().saturating_mul(2); + + // Actual program to execute on People Chain. + let program: Xcm<()> = Xcm(vec![ + // Unpaid as this is constructed by the system, once per user. The user shouldn't have + // their balance reduced by teleport fees for the favor of migrating. + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + // Receive the asset into holding. + ReceiveTeleportedAsset(wnd_reanchored), + // Deposit into the user's account. + DepositAsset { + assets: Wild(AllCounted(1)), + beneficiary: Junction::AccountId32 { network: None, id: who.clone().into() } + .into_location() + .into(), + }, + // Poke the deposit to reserve the appropriate amount on the parachain. + Transact { + origin_kind: OriginKind::Superuser, + require_weight_at_most: remote_weight_limit, + call: poke.encode().into(), + }, + ]); + + // send + let _ = >::send( + RawOrigin::Root.into(), + Box::new(VersionedMultiLocation::V3(destination)), + Box::new(VersionedXcm::V3(program)), + )?; + Ok(()) + } +} diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index d3862aff257..2e8394b0ee4 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -31,7 +31,7 @@ use frame_support::{ genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ - fungible::HoldConsideration, ConstU32, EitherOf, EitherOfDiverse, Everything, + fungible::HoldConsideration, ConstU32, Contains, EitherOf, EitherOfDiverse, EverythingBut, InstanceFilter, KeyOwnerProofSystem, LinearStoragePrice, ProcessMessage, ProcessMessageError, WithdrawReasons, }, @@ -56,7 +56,7 @@ use primitives::{ use runtime_common::{ assigned_slots, auctions, crowdloan, elections::OnChainAccuracy, - impl_runtime_weights, + identity_migrator, impl_runtime_weights, impls::{ LocatableAssetConverter, ToAuthor, VersionedLocatableAsset, VersionedMultiLocationConverter, }, @@ -119,6 +119,10 @@ mod bag_thresholds; mod weights; pub mod xcm_config; +// Implemented types. +mod impls; +use impls::ToParachainIdentityReaper; + // Governance and configurations. pub mod governance; use governance::{ @@ -161,13 +165,24 @@ pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } +/// A type to identify calls to the Identity pallet. These will be filtered to prevent invocation, +/// locking the state of the pallet and preventing further updates to identities and sub-identities. +/// The locked state will be the genesis state of a new system chain and then removed from the Relay +/// Chain. +pub struct IsIdentityCall; +impl Contains for IsIdentityCall { + fn contains(c: &RuntimeCall) -> bool { + matches!(c, RuntimeCall::Identity(_)) + } +} + parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; + type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type RuntimeOrigin = RuntimeOrigin; @@ -1328,6 +1343,14 @@ impl auctions::Config for Runtime { type WeightInfo = weights::runtime_common_auctions::WeightInfo; } +impl identity_migrator::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // To be changed to `EnsureSigned` once there is a People Chain to migrate to. + type Reaper = EnsureRoot; + type ReapIdentityHandler = ToParachainIdentityReaper; + type WeightInfo = weights::runtime_common_identity_migrator::WeightInfo; +} + parameter_types! { pub const PoolsPalletId: PalletId = PalletId(*b"py/nopls"); pub const MaxPointsToBalance: u8 = 10; @@ -1491,6 +1514,9 @@ construct_runtime! { // Root testing pallet. RootTesting: pallet_root_testing::{Pallet, Call, Storage, Event} = 102, + + // Pallet for migrating Identity to a parachain. To be removed post-migration. + IdentityMigrator: identity_migrator::{Pallet, Call, Event} = 248, } } @@ -1587,6 +1613,7 @@ mod benches { [runtime_common::assigned_slots, AssignedSlots] [runtime_common::auctions, Auctions] [runtime_common::crowdloan, Crowdloan] + [runtime_common::identity_migrator, IdentityMigrator] [runtime_common::paras_registrar, Registrar] [runtime_common::slots, Slots] [runtime_parachains::configuration, Configuration] diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 9ae6798d70b..3841579088a 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -46,6 +46,7 @@ pub mod pallet_xcm; pub mod runtime_common_assigned_slots; pub mod runtime_common_auctions; pub mod runtime_common_crowdloan; +pub mod runtime_common_identity_migrator; pub mod runtime_common_paras_registrar; pub mod runtime_common_slots; pub mod runtime_parachains_configuration; diff --git a/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs new file mode 100644 index 00000000000..cec357453b6 --- /dev/null +++ b/polkadot/runtime/westend/src/weights/runtime_common_identity_migrator.rs @@ -0,0 +1,97 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `runtime_common::identity_migrator` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `sbtb`, CPU: `13th Gen Intel(R) Core(TM) i7-1365U` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// pallet +// --chain=rococo-dev +// --steps=2 +// --repeat=1 +// --pallet=runtime_common::identity_migrator +// --extrinsic=* +// --output=./migrator-release.rs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `runtime_common::identity_migrator`. +pub struct WeightInfo(PhantomData); +impl runtime_common::identity_migrator::WeightInfo for WeightInfo { + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Identity::SuperOf` (r:0 w:100) + /// Proof: `Identity::SuperOf` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 20]`. + /// The range of component `s` is `[0, 100]`. + fn reap_identity(r: u32, s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `7292 + r * (8 ±0) + s * (32 ±0)` + // Estimated: `11003 + r * (8 ±0) + s * (33 ±0)` + // Minimum execution time: 163_756_000 picoseconds. + Weight::from_parts(158_982_500, 0) + .saturating_add(Weight::from_parts(0, 11003)) + // Standard Error: 1_143_629 + .saturating_add(Weight::from_parts(238_675, 0).saturating_mul(r.into())) + // Standard Error: 228_725 + .saturating_add(Weight::from_parts(1_529_645, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(r.into())) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(s.into())) + } + /// Storage: `Identity::IdentityOf` (r:1 w:1) + /// Proof: `Identity::IdentityOf` (`max_values`: None, `max_size`: Some(7538), added: 10013, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Identity::SubsOf` (r:1 w:1) + /// Proof: `Identity::SubsOf` (`max_values`: None, `max_size`: Some(3258), added: 5733, mode: `MaxEncodedLen`) + fn poke_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `7229` + // Estimated: `11003` + // Minimum execution time: 137_570_000 picoseconds. + Weight::from_parts(137_570_000, 0) + .saturating_add(Weight::from_parts(0, 11003)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index 264ea3ddb41..133f9eeb4be 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -79,8 +79,10 @@ mod tests; mod types; pub mod weights; +use codec::Encode; use frame_support::{ - pallet_prelude::DispatchResult, + ensure, + pallet_prelude::{DispatchError, DispatchResult}, traits::{BalanceStatus, Currency, Get, OnUnbalanced, ReservableCurrency}, }; use sp_runtime::traits::{AppendZerosInput, Hash, Saturating, StaticLookup, Zero}; @@ -395,8 +397,7 @@ pub mod pallet { ); let (old_deposit, old_ids) = >::get(&sender); - let new_deposit = - T::SubAccountDeposit::get().saturating_mul(>::from(subs.len() as u32)); + let new_deposit = Self::subs_deposit(subs.len() as u32); let not_other_sub = subs.iter().filter_map(|i| SuperOf::::get(&i.0)).all(|i| i.0 == sender); @@ -898,6 +899,26 @@ impl Pallet { .collect() } + /// Calculate the deposit required for a number of `sub` accounts. + fn subs_deposit(subs: u32) -> BalanceOf { + T::SubAccountDeposit::get().saturating_mul(>::from(subs)) + } + + /// Take the `current` deposit that `who` is holding, and update it to a `new` one. + fn rejig_deposit( + who: &T::AccountId, + current: BalanceOf, + new: BalanceOf, + ) -> DispatchResult { + if new > current { + T::Currency::reserve(who, new - current)?; + } else if new < current { + let err_amount = T::Currency::unreserve(who, current - new); + debug_assert!(err_amount.is_zero()); + } + Ok(()) + } + /// Check if the account has corresponding identity information by the identity field. pub fn has_identity( who: &T::AccountId, @@ -906,4 +927,110 @@ impl Pallet { IdentityOf::::get(who) .map_or(false, |registration| (registration.info.has_identity(fields))) } + + /// Reap an identity, clearing associated storage items and refunding any deposits. This + /// function is very similar to (a) `clear_identity`, but called on a `target` account instead + /// of self; and (b) `kill_identity`, but without imposing a slash. + /// + /// Parameters: + /// - `target`: The account for which to reap identity state. + /// + /// Return type is a tuple of the number of registrars, `IdentityInfo` bytes, and sub accounts, + /// respectively. + /// + /// NOTE: This function is here temporarily for migration of Identity info from the Polkadot + /// Relay Chain into a system parachain. It will be removed after the migration. + pub fn reap_identity(who: &T::AccountId) -> Result<(u32, u32, u32), DispatchError> { + // `take` any storage items keyed by `target` + // identity + let id = >::take(&who).ok_or(Error::::NotNamed)?; + let registrars = id.judgements.len() as u32; + let encoded_byte_size = id.info.encoded_size() as u32; + + // subs + let (subs_deposit, sub_ids) = >::take(&who); + let actual_subs = sub_ids.len() as u32; + for sub in sub_ids.iter() { + >::remove(sub); + } + + // unreserve any deposits + let deposit = id.total_deposit().saturating_add(subs_deposit); + let err_amount = T::Currency::unreserve(&who, deposit); + debug_assert!(err_amount.is_zero()); + Ok((registrars, encoded_byte_size, actual_subs)) + } + + /// Update the deposits held by `target` for its identity info. + /// + /// Parameters: + /// - `target`: The account for which to update deposits. + /// + /// Return type is a tuple of the new Identity and Subs deposits, respectively. + /// + /// NOTE: This function is here temporarily for migration of Identity info from the Polkadot + /// Relay Chain into a system parachain. It will be removed after the migration. + pub fn poke_deposit( + target: &T::AccountId, + ) -> Result<(BalanceOf, BalanceOf), DispatchError> { + // Identity Deposit + let new_id_deposit = IdentityOf::::try_mutate( + &target, + |registration| -> Result, DispatchError> { + let reg = registration.as_mut().ok_or(Error::::NoIdentity)?; + // Calculate what deposit should be + let encoded_byte_size = reg.info.encoded_size() as u32; + let byte_deposit = + T::ByteDeposit::get().saturating_mul(>::from(encoded_byte_size)); + let new_id_deposit = T::BasicDeposit::get().saturating_add(byte_deposit); + + // Update account + Self::rejig_deposit(&target, reg.deposit, new_id_deposit)?; + + reg.deposit = new_id_deposit; + Ok(new_id_deposit) + }, + )?; + + // Subs Deposit + let new_subs_deposit = SubsOf::::try_mutate( + &target, + |(current_subs_deposit, subs_of)| -> Result, DispatchError> { + let new_subs_deposit = Self::subs_deposit(subs_of.len() as u32); + Self::rejig_deposit(&target, *current_subs_deposit, new_subs_deposit)?; + *current_subs_deposit = new_subs_deposit; + Ok(new_subs_deposit) + }, + )?; + Ok((new_id_deposit, new_subs_deposit)) + } + + /// Set an identity with zero deposit. Only used for benchmarking that involves `rejig_deposit`. + #[cfg(feature = "runtime-benchmarks")] + pub fn set_identity_no_deposit( + who: &T::AccountId, + info: T::IdentityInformation, + ) -> DispatchResult { + IdentityOf::::insert( + &who, + Registration { + judgements: Default::default(), + deposit: Zero::zero(), + info: info.clone(), + }, + ); + Ok(()) + } + + /// Set subs with zero deposit. Only used for benchmarking that involves `rejig_deposit`. + #[cfg(feature = "runtime-benchmarks")] + pub fn set_sub_no_deposit(who: &T::AccountId, sub: T::AccountId) -> DispatchResult { + use frame_support::BoundedVec; + let subs = BoundedVec::<_, T::MaxSubAccounts>::try_from(vec![sub]).unwrap(); + SubsOf::::insert::< + &T::AccountId, + (BalanceOf, BoundedVec), + >(&who, (Zero::zero(), subs)); + Ok(()) + } } diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 71192ea65a8..78074df933a 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -712,3 +712,70 @@ fn test_has_identity() { )); }); } + +#[test] +fn reap_identity_works() { + new_test_ext().execute_with(|| { + let ten_info = ten(); + assert_ok!(Identity::set_identity(RuntimeOrigin::signed(10), Box::new(ten_info.clone()))); + assert_ok!(Identity::set_subs( + RuntimeOrigin::signed(10), + vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] + )); + // deposit is correct + let id_deposit = id_deposit(&ten_info); + let subs_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - subs_deposit); + // reap + assert_ok!(Identity::reap_identity(&10)); + // no identity or subs + assert!(Identity::identity(10).is_none()); + assert!(Identity::super_of(20).is_none()); + // balance is unreserved + assert_eq!(Balances::free_balance(10), 1000); + }); +} + +#[test] +fn poke_deposit_works() { + new_test_ext().execute_with(|| { + let ten_info = ten(); + // Set a custom registration with 0 deposit + IdentityOf::::insert( + &10, + Registration { + judgements: BoundedVec::default(), + deposit: Zero::zero(), + info: ten_info.clone(), + }, + ); + assert!(Identity::identity(10).is_some()); + // Set a sub with zero deposit + SubsOf::::insert::<&u64, (u64, BoundedVec>)>( + &10, + (0, vec![20].try_into().unwrap()), + ); + SuperOf::::insert(&20, (&10, Data::Raw(vec![1; 1].try_into().unwrap()))); + // Balance is free + assert_eq!(Balances::free_balance(10), 1000); + + // poke + assert_ok!(Identity::poke_deposit(&10)); + + // free balance reduced correctly + let id_deposit = id_deposit(&ten_info); + let subs_deposit: u64 = <::SubAccountDeposit as Get>::get(); + assert_eq!(Balances::free_balance(10), 1000 - id_deposit - subs_deposit); + // new registration deposit is 10 + assert_eq!( + Identity::identity(&10), + Some(Registration { + judgements: BoundedVec::default(), + deposit: id_deposit, + info: ten() + }) + ); + // new subs deposit is 10 vvvvvvvvvvvv + assert_eq!(Identity::subs_of(10), (subs_deposit, vec![20].try_into().unwrap())); + }); +} -- GitLab From 0226b55f9ffff28821458a71c84eb5d3e75f1794 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Wed, 15 Nov 2023 15:01:55 +0000 Subject: [PATCH 060/199] Add `collectives-westend` and `glutton-westend` runtimes (#2024) Add collectives and glutton parachain westend runtimes to prepare for #1737. The removal of system parachain native runtimes #1737 is blocked until chainspecs and runtime APIs can be dealt with cleanly (merge of #1256 and follow up PRs). In the meantime, these additions are ready to be merged to `master`, so I have separated them out into this PR. Also marked `bridge-hub-westend` as unimplemented in line with [this issue](https://github.com/paritytech/parity-bridges-common/issues/2602). TODO - [x] add to `command-bot` benchmarks - [x] add to `command-bot-scripts` benchmarks - [x] generate weights --------- Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Muharem Co-authored-by: command-bot <> Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Branislav Kontur --- .../build-and-attach-release-runtimes.yml | 2 + .gitlab/pipeline/check.yml | 13 + .gitlab/pipeline/short-benchmarks.yml | 10 + Cargo.lock | 134 +++ Cargo.toml | 2 + .../collectives-westend/Cargo.toml | 230 ++++ .../collectives/collectives-westend/build.rs | 26 + .../collectives-westend/src/ambassador/mod.rs | 262 +++++ .../src/ambassador/origins.rs | 135 +++ .../src/ambassador/tracks.rs | 282 +++++ .../collectives-westend/src/fellowship/mod.rs | 238 ++++ .../src/fellowship/origins.rs | 247 ++++ .../src/fellowship/tracks.rs | 532 +++++++++ .../collectives-westend/src/impls.rs | 229 ++++ .../collectives-westend/src/lib.rs | 1023 +++++++++++++++++ .../src/weights/block_weights.rs | 53 + .../src/weights/cumulus_pallet_dmp_queue.rs | 131 +++ .../cumulus_pallet_parachain_system.rs | 80 ++ .../src/weights/cumulus_pallet_xcmp_queue.rs | 148 +++ .../src/weights/extrinsic_weights.rs | 53 + .../src/weights/frame_system.rs | 154 +++ .../collectives-westend/src/weights/mod.rs | 50 + .../src/weights/pallet_alliance.rs | 494 ++++++++ .../src/weights/pallet_balances.rs | 152 +++ .../src/weights/pallet_collator_selection.rs | 246 ++++ .../src/weights/pallet_collective.rs | 304 +++++ .../src/weights/pallet_collective_content.rs | 93 ++ .../pallet_core_fellowship_ambassador_core.rs | 223 ++++ .../pallet_core_fellowship_fellowship_core.rs | 222 ++++ .../src/weights/pallet_message_queue.rs | 179 +++ .../src/weights/pallet_multisig.rs | 164 +++ .../src/weights/pallet_preimage.rs | 232 ++++ .../src/weights/pallet_proxy.rs | 225 ++++ ...ranked_collective_ambassador_collective.rs | 177 +++ ...ranked_collective_fellowship_collective.rs | 176 +++ .../pallet_referenda_ambassador_referenda.rs | 536 +++++++++ .../pallet_referenda_fellowship_referenda.rs | 535 +++++++++ .../pallet_salary_ambassador_salary.rs | 190 +++ .../pallet_salary_fellowship_salary.rs | 189 +++ .../src/weights/pallet_scheduler.rs | 206 ++++ .../src/weights/pallet_session.rs | 80 ++ .../src/weights/pallet_timestamp.rs | 74 ++ .../src/weights/pallet_utility.rs | 101 ++ .../src/weights/pallet_xcm.rs | 323 ++++++ .../src/weights/paritydb_weights.rs | 63 + .../src/weights/rocksdb_weights.rs | 63 + .../collectives-westend/src/xcm_config.rs | 364 ++++++ .../glutton/glutton-westend/Cargo.toml | 138 +++ .../runtimes/glutton/glutton-westend/build.rs | 24 + .../glutton/glutton-westend/src/lib.rs | 532 +++++++++ .../cumulus_pallet_parachain_system.rs | 75 ++ .../src/weights/frame_system.rs | 153 +++ .../glutton-westend/src/weights/mod.rs | 19 + .../src/weights/pallet_glutton.rs | 178 +++ .../src/weights/pallet_message_queue.rs | 179 +++ .../src/weights/pallet_timestamp.rs | 73 ++ .../glutton/glutton-westend/src/xcm_config.rs | 92 ++ cumulus/parachains/testnets-common/Cargo.toml | 44 + cumulus/parachains/testnets-common/src/lib.rs | 30 + .../parachains/testnets-common/src/rococo.rs | 119 ++ .../parachains/testnets-common/src/westend.rs | 140 +++ .../parachains/testnets-common/src/wococo.rs | 17 + cumulus/polkadot-parachain/Cargo.toml | 6 + .../src/chain_spec/collectives.rs | 133 +++ .../src/chain_spec/glutton.rs | 69 ++ cumulus/polkadot-parachain/src/command.rs | 101 +- cumulus/polkadot-parachain/src/service.rs | 30 + cumulus/scripts/benchmarks.sh | 2 + 68 files changed, 11792 insertions(+), 7 deletions(-) create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/build.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs create mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/build.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs create mode 100644 cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs create mode 100644 cumulus/parachains/testnets-common/Cargo.toml create mode 100644 cumulus/parachains/testnets-common/src/lib.rs create mode 100644 cumulus/parachains/testnets-common/src/rococo.rs create mode 100644 cumulus/parachains/testnets-common/src/westend.rs create mode 100644 cumulus/parachains/testnets-common/src/wococo.rs diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml index c7cd4b34384..db0175c6855 100644 --- a/.github/workflows/build-and-attach-release-runtimes.yml +++ b/.github/workflows/build-and-attach-release-runtimes.yml @@ -19,6 +19,8 @@ jobs: - { name: asset-hub-westend, package: asset-hub-westend-runtime, path: cumulus/parachains/runtimes/assets/asset-hub-westend } - { name: bridge-hub-rococo, package: bridge-hub-rococo-runtime, path: cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo } - { name: contracts-rococo, package: contracts-rococo-runtime, path: cumulus/parachains/runtimes/contracts/contracts-rococo } + - { name: collectives-westend, package: collectives-westend-runtime, path: cumulus/parachains/runtimes/collectives/collectives-westend } + - { name: glutton-westend, package: glutton-westend-runtime, path: cumulus/parachains/runtimes/glutton/glutton-westend } build_config: # Release build has logging disabled and no dev features - { type: on-chain-release, opts: --features on-chain-release-build } diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index cbb3baf277c..4071fdf9758 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -177,6 +177,19 @@ check-runtime-migration-contracts-rococo: WASM: "contracts_rococo_runtime.compact.compressed.wasm" URI: "wss://rococo-contracts-rpc.polkadot.io:443" +# Check runtime migrations for Parity managed collectives chains +check-runtime-migration-collectives-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "collectives-westend" + PACKAGE: "collectives-westend-runtime" + WASM: "collectives_westend_runtime.compact.compressed.wasm" + URI: "wss://westend-collectives-rpc.polkadot.io:443" + find-fail-ci-phrase: stage: check variables: diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 76c75e815ce..0218d3fdac0 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -94,7 +94,17 @@ short-benchmark-collectives-polkadot: variables: RUNTIME_CHAIN: collectives-polkadot-dev +short-benchmark-collectives-westend: + <<: *short-bench-cumulus + variables: + RUNTIME_CHAIN: collectives-westend-dev + short-benchmark-glutton-kusama: <<: *short-bench-cumulus variables: RUNTIME_CHAIN: glutton-kusama-dev-1300 + +short-benchmark-glutton-westend: + <<: *short-bench-cumulus + variables: + RUNTIME_CHAIN: glutton-westend-dev-1300 diff --git a/Cargo.lock b/Cargo.lock index b1a46a88f38..6b52e62d742 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3001,6 +3001,80 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "collectives-westend-runtime" +version = "1.0.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-primitives-core", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "hex-literal", + "log", + "pallet-alliance", + "pallet-aura", + "pallet-authorship", + "pallet-balances", + "pallet-collator-selection", + "pallet-collective", + "pallet-collective-content", + "pallet-core-fellowship", + "pallet-message-queue", + "pallet-multisig", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-referenda", + "pallet-salary", + "pallet-scheduler", + "pallet-session", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-utility", + "pallet-xcm", + "parachains-common", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-runtime-common", + "scale-info", + "smallvec", + "sp-api", + "sp-arithmetic", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", + "testnets-common", + "westend-runtime-constants", +] + [[package]] name = "color-eyre" version = "0.6.2" @@ -6280,6 +6354,51 @@ dependencies = [ "substrate-wasm-builder", ] +[[package]] +name = "glutton-westend-runtime" +version = "1.0.0" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "cumulus-pallet-xcm", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-timestamp", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-aura", + "pallet-glutton", + "pallet-message-queue", + "pallet-sudo", + "pallet-timestamp", + "parachains-common", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-genesis-builder", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std 8.0.0", + "sp-storage 13.0.0", + "sp-transaction-pool", + "sp-version", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-wasm-builder", +] + [[package]] name = "group" version = "0.12.1" @@ -12872,6 +12991,7 @@ dependencies = [ "bridge-hub-westend-runtime", "clap 4.4.6", "collectives-polkadot-runtime", + "collectives-westend-runtime", "color-print", "contracts-rococo-runtime", "cumulus-client-cli", @@ -12889,6 +13009,7 @@ dependencies = [ "frame-benchmarking-cli", "futures", "glutton-runtime", + "glutton-westend-runtime", "hex-literal", "jsonrpsee", "log", @@ -19217,6 +19338,19 @@ dependencies = [ "sp-weights", ] +[[package]] +name = "testnets-common" +version = "1.0.0" +dependencies = [ + "frame-support", + "polkadot-core-primitives", + "rococo-runtime-constants", + "smallvec", + "sp-runtime", + "substrate-wasm-builder", + "westend-runtime-constants", +] + [[package]] name = "textwrap" version = "0.16.0" diff --git a/Cargo.toml b/Cargo.toml index f9779681cae..27351c09581 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,8 +94,10 @@ members = [ "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", "cumulus/parachains/runtimes/bridge-hubs/test-utils", "cumulus/parachains/runtimes/collectives/collectives-polkadot", + "cumulus/parachains/runtimes/collectives/collectives-westend", "cumulus/parachains/runtimes/contracts/contracts-rococo", "cumulus/parachains/runtimes/glutton/glutton-kusama", + "cumulus/parachains/runtimes/glutton/glutton-westend", "cumulus/parachains/runtimes/starters/seedling", "cumulus/parachains/runtimes/starters/shell", "cumulus/parachains/runtimes/test-utils", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml new file mode 100644 index 00000000000..94f2de33e90 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -0,0 +1,230 @@ +[package] +name = "collectives-westend-runtime" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Westend Collectives Parachain Runtime" + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +hex-literal = { version = "0.4.1" } +log = { version = "0.4.20", default-features = false } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +smallvec = "1.11.0" + +# Substrate +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} +pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false} +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} +pallet-preimage = { path = "../../../../../substrate/frame/preimage", default-features = false } +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} +pallet-scheduler = { path = "../../../../../substrate/frame/scheduler", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} +pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false} +pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false} +pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false} +pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} + +# Polkadot +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } +pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } +pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../common", default-features = false } +testnets-common = { path = "../../../testnets-common", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } + +[dev-dependencies] +sp-io = { path = "../../../../../substrate/primitives/io", features = [ "std" ]} + +[features] +default = [ "std" ] +runtime-benchmarks = [ + "cumulus-pallet-dmp-queue/runtime-benchmarks", + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-pallet-session-benchmarking/runtime-benchmarks", + "cumulus-pallet-xcmp-queue/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "cumulus-primitives-utility/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-alliance/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", + "pallet-collator-selection/runtime-benchmarks", + "pallet-collective-content/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-core-fellowship/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-multisig/runtime-benchmarks", + "pallet-preimage/runtime-benchmarks", + "pallet-proxy/runtime-benchmarks", + "pallet-ranked-collective/runtime-benchmarks", + "pallet-referenda/runtime-benchmarks", + "pallet-salary/runtime-benchmarks", + "pallet-scheduler/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", + "pallet-xcm/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "polkadot-parachain-primitives/runtime-benchmarks", + "polkadot-runtime-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "testnets-common/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-dmp-queue/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "cumulus-pallet-xcmp-queue/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-alliance/try-runtime", + "pallet-aura/try-runtime", + "pallet-authorship/try-runtime", + "pallet-balances/try-runtime", + "pallet-collator-selection/try-runtime", + "pallet-collective-content/try-runtime", + "pallet-collective/try-runtime", + "pallet-core-fellowship/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-multisig/try-runtime", + "pallet-preimage/try-runtime", + "pallet-proxy/try-runtime", + "pallet-ranked-collective/try-runtime", + "pallet-referenda/try-runtime", + "pallet-salary/try-runtime", + "pallet-scheduler/try-runtime", + "pallet-session/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-utility/try-runtime", + "pallet-xcm/try-runtime", + "parachain-info/try-runtime", + "polkadot-runtime-common/try-runtime", + "sp-runtime/try-runtime", +] +std = [ + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-dmp-queue/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-session-benchmarking/std", + "cumulus-pallet-xcm/std", + "cumulus-pallet-xcmp-queue/std", + "cumulus-primitives-core/std", + "cumulus-primitives-utility/std", + "frame-benchmarking?/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "log/std", + "pallet-alliance/std", + "pallet-aura/std", + "pallet-authorship/std", + "pallet-balances/std", + "pallet-collator-selection/std", + "pallet-collective-content/std", + "pallet-collective/std", + "pallet-core-fellowship/std", + "pallet-message-queue/std", + "pallet-multisig/std", + "pallet-preimage/std", + "pallet-proxy/std", + "pallet-ranked-collective/std", + "pallet-referenda/std", + "pallet-salary/std", + "pallet-scheduler/std", + "pallet-session/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "pallet-utility/std", + "pallet-xcm/std", + "parachain-info/std", + "parachains-common/std", + "polkadot-core-primitives/std", + "polkadot-parachain-primitives/std", + "polkadot-runtime-common/std", + "scale-info/std", + "sp-api/std", + "sp-arithmetic/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "substrate-wasm-builder", + "testnets-common/std", + "westend-runtime-constants/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] + +experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs new file mode 100644 index 00000000000..60f8a125129 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/build.rs @@ -0,0 +1,26 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[cfg(feature = "std")] +fn main() { + substrate_wasm_builder::WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} + +#[cfg(not(feature = "std"))] +fn main() {} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs new file mode 100644 index 00000000000..18c1466bf36 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/mod.rs @@ -0,0 +1,262 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program. +//! +//! The module defines the following on-chain functionality of the Ambassador Program: +//! +//! - Managed set of program members, where every member has a [rank](ranks) +//! (via [AmbassadorCollective](pallet_ranked_collective)). +//! - Referendum functionality for the program members to propose, vote on, and execute +//! proposals on behalf of the members of a certain [rank](Origin) +//! (via [AmbassadorReferenda](pallet_referenda)). +//! - Managed content (charter, announcements) (via [pallet_collective_content]). +//! - Promotion and demotion periods, register of members' activity, and rank based salaries +//! (via [AmbassadorCore](pallet_core_fellowship)). +//! - Members' salaries (via [AmbassadorSalary](pallet_salary), requiring a member to be +//! imported or inducted into [AmbassadorCore](pallet_core_fellowship)). + +pub mod origins; +mod tracks; + +use super::*; +use crate::xcm_config::{FellowshipAdminBodyId, WndAssetHub}; +use frame_support::traits::{EitherOf, MapSuccess, TryMapSuccess}; +pub use origins::pallet_origins as pallet_ambassador_origins; +use origins::pallet_origins::{ + EnsureAmbassadorsVoice, EnsureAmbassadorsVoiceFrom, EnsureHeadAmbassadorsVoice, Origin, +}; +use parachains_common::polkadot::account; +use sp_core::ConstU128; +use sp_runtime::traits::{CheckedReduceBy, ConstU16, ConvertToValue, Replace}; +use xcm::prelude::*; +use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; + +/// The Ambassador Program's member ranks. +pub mod ranks { + use pallet_ranked_collective::Rank; + + #[allow(dead_code)] + pub const CANDIDATE: Rank = 0; + pub const AMBASSADOR_TIER_1: Rank = 1; + pub const AMBASSADOR_TIER_2: Rank = 2; + pub const SENIOR_AMBASSADOR_TIER_3: Rank = 3; + pub const SENIOR_AMBASSADOR_TIER_4: Rank = 4; + pub const HEAD_AMBASSADOR_TIER_5: Rank = 5; + pub const HEAD_AMBASSADOR_TIER_6: Rank = 6; + pub const HEAD_AMBASSADOR_TIER_7: Rank = 7; + pub const MASTER_AMBASSADOR_TIER_8: Rank = 8; + pub const MASTER_AMBASSADOR_TIER_9: Rank = 9; +} + +impl pallet_ambassador_origins::Config for Runtime {} + +pub type AmbassadorCollectiveInstance = pallet_ranked_collective::Instance2; + +/// Demotion is by any of: +/// - Root can demote arbitrarily. +/// - the FellowshipAdmin origin (i.e. token holder referendum); +/// - a senior members vote by the rank two above the current rank. +pub type DemoteOrigin = EitherOf< + frame_system::EnsureRootWithSuccess>, + EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + TryMapSuccess< + EnsureAmbassadorsVoiceFrom>, + CheckedReduceBy>, + >, + >, +>; + +/// Promotion and approval (rank-retention) is by any of: +/// - Root can promote arbitrarily. +/// - the FellowshipAdmin origin (i.e. token holder referendum); +/// - a senior members vote by the rank two above the new/current rank. +/// - a member of rank `5` or above can add a candidate (rank `0`). +pub type PromoteOrigin = EitherOf< + DemoteOrigin, + TryMapSuccess< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + Replace>, + >, +>; + +impl pallet_ranked_collective::Config for Runtime { + type WeightInfo = weights::pallet_ranked_collective_ambassador_collective::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type PromoteOrigin = PromoteOrigin; + type DemoteOrigin = DemoteOrigin; + type Polls = AmbassadorReferenda; + type MinRankOfClass = sp_runtime::traits::Identity; + type VoteWeight = pallet_ranked_collective::Linear; +} + +parameter_types! { + pub const AlarmInterval: BlockNumber = 1; + pub const SubmissionDeposit: Balance = 0; + pub const UndecidingTimeout: BlockNumber = 7 * DAYS; + // The Ambassador Referenda pallet account, used as a temporary place to deposit a slashed + // imbalance before teleport to the treasury. + pub AmbassadorPalletAccount: AccountId = account::AMBASSADOR_REFERENDA_PALLET_ID.into_account_truncating(); +} + +pub type AmbassadorReferendaInstance = pallet_referenda::Instance2; + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_ambassador_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + // A proposal can be submitted by a member of the Ambassador Program of + // [ranks::SENIOR_AMBASSADOR_TIER_3] rank or higher. + type SubmitOrigin = pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::SENIOR_AMBASSADOR_TIER_3 }, + >; + type CancelOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type KillOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type Slash = ToParentTreasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf; + type SubmissionDeposit = SubmissionDeposit; + type MaxQueued = ConstU32<20>; + type UndecidingTimeout = UndecidingTimeout; + type AlarmInterval = AlarmInterval; + type Tracks = tracks::TracksInfo; + type Preimages = Preimage; +} + +parameter_types! { + pub const AnnouncementLifetime: BlockNumber = 180 * DAYS; + pub const MaxAnnouncements: u32 = 50; +} + +pub type AmbassadorContentInstance = pallet_collective_content::Instance1; + +impl pallet_collective_content::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type CharterOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; + type AnnouncementLifetime = AnnouncementLifetime; + // An announcement can be submitted by a Senior Ambassador member or an ambassador plurality + // voice taken via referendum. + type AnnouncementOrigin = EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::SENIOR_AMBASSADOR_TIER_3 }, + >, + EnsureAmbassadorsVoice, + >; + type MaxAnnouncements = MaxAnnouncements; + type WeightInfo = weights::pallet_collective_content::WeightInfo; +} + +pub type AmbassadorCoreInstance = pallet_core_fellowship::Instance2; + +impl pallet_core_fellowship::Config for Runtime { + type WeightInfo = weights::pallet_core_fellowship_ambassador_core::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Members = pallet_ranked_collective::Pallet; + type Balance = Balance; + // Parameters are set by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote among all Head Ambassadors. + type ParamsOrigin = EitherOfDiverse< + EnsureRoot, + EitherOfDiverse< + EnsureXcm>, + EnsureHeadAmbassadorsVoice, + >, + >; + // Induction (creating a candidate) is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a single Head Ambassador; + // - a vote among all senior members. + type InductOrigin = EitherOfDiverse< + EnsureRoot, + EitherOfDiverse< + EnsureXcm>, + EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + AmbassadorCollectiveInstance, + { ranks::HEAD_AMBASSADOR_TIER_5 }, + >, + EnsureAmbassadorsVoiceFrom>, + >, + >, + >; + type ApproveOrigin = PromoteOrigin; + type PromoteOrigin = PromoteOrigin; + type EvidenceSize = ConstU32<65536>; +} + +pub type AmbassadorSalaryInstance = pallet_salary::Instance2; + +parameter_types! { + // The interior location on AssetHub for the paying account. This is the Ambassador Salary + // pallet instance (which sits at index 74). This sovereign account will need funding. + pub AmbassadorSalaryLocation: InteriorMultiLocation = PalletInstance(74).into(); +} + +/// [`PayOverXcm`] setup to pay the Ambassador salary on the AssetHub in WND. +pub type AmbassadorSalaryPaymaster = PayOverXcm< + AmbassadorSalaryLocation, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + AccountId, + (), + ConvertToValue, + AliasesIntoAccountId32<(), AccountId>, +>; + +impl pallet_salary::Config for Runtime { + type WeightInfo = weights::pallet_salary_ambassador_salary::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Paymaster = AmbassadorSalaryPaymaster; + #[cfg(feature = "runtime-benchmarks")] + type Paymaster = crate::impls::benchmarks::PayWithEnsure< + AmbassadorSalaryPaymaster, + crate::impls::benchmarks::OpenHrmpChannel>, + >; + type Members = pallet_ranked_collective::Pallet; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Salary = pallet_core_fellowship::Pallet; + #[cfg(feature = "runtime-benchmarks")] + type Salary = frame_support::traits::tokens::ConvertRank< + crate::impls::benchmarks::RankToSalary, + >; + // 15 days to register for a salary payment. + type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; + // 15 days to claim the salary payment. + type PayoutPeriod = ConstU32<{ 15 * DAYS }>; + // Total monthly salary budget. + type Budget = ConstU128<{ 10_000 * DOLLARS }>; +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs new file mode 100644 index 00000000000..3ce8a6b9e5d --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/origins.rs @@ -0,0 +1,135 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program's origins. + +#[frame_support::pallet] +pub mod pallet_origins { + use crate::ambassador::ranks; + use frame_support::pallet_prelude::*; + use pallet_ranked_collective::Rank; + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// The pallet configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Plurality voice of the [ranks::AMBASSADOR_TIER_1] members or above given via + /// referendum. + Ambassadors, + /// Plurality voice of the [ranks::AMBASSADOR_TIER_2] members or above given via + /// referendum. + AmbassadorsTier2, + /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_3] members or above given via + /// referendum. + SeniorAmbassadors, + /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_4] members or above given via + /// referendum. + SeniorAmbassadorsTier4, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_5] members or above given via + /// referendum. + HeadAmbassadors, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_6] members or above given via + /// referendum. + HeadAmbassadorsTier6, + /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_7] members or above given via + /// referendum. + HeadAmbassadorsTier7, + /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_8] members or above given via + /// referendum. + MasterAmbassadors, + /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_9] members or above given via + /// referendum. + MasterAmbassadorsTier9, + } + + impl Origin { + /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for + /// any. + pub fn as_voice(&self) -> Option { + Some(match &self { + Origin::Ambassadors => ranks::AMBASSADOR_TIER_1, + Origin::AmbassadorsTier2 => ranks::AMBASSADOR_TIER_2, + Origin::SeniorAmbassadors => ranks::SENIOR_AMBASSADOR_TIER_3, + Origin::SeniorAmbassadorsTier4 => ranks::SENIOR_AMBASSADOR_TIER_4, + Origin::HeadAmbassadors => ranks::HEAD_AMBASSADOR_TIER_5, + Origin::HeadAmbassadorsTier6 => ranks::HEAD_AMBASSADOR_TIER_6, + Origin::HeadAmbassadorsTier7 => ranks::HEAD_AMBASSADOR_TIER_7, + Origin::MasterAmbassadors => ranks::MASTER_AMBASSADOR_TIER_8, + Origin::MasterAmbassadorsTier9 => ranks::MASTER_AMBASSADOR_TIER_9, + }) + } + } + + /// Implementation of the [EnsureOrigin] trait for the [Origin::HeadAmbassadors] origin. + pub struct EnsureHeadAmbassadorsVoice; + impl> + From> EnsureOrigin for EnsureHeadAmbassadorsVoice { + type Success = (); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::HeadAmbassadors => Ok(()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::HeadAmbassadors)) + } + } + + /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s + /// from a given rank `R` with the success result of the corresponding [Rank]. + pub struct EnsureAmbassadorsVoiceFrom(PhantomData); + impl, O: Into> + From> EnsureOrigin + for EnsureAmbassadorsVoiceFrom + { + type Success = Rank; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match Origin::as_voice(&o) { + Some(r) if r >= R::get() => Ok(r), + _ => Err(O::from(o)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + ranks::MASTER_AMBASSADOR_TIER_9 + .ge(&R::get()) + .then(|| O::from(Origin::MasterAmbassadorsTier9)) + .ok_or(()) + } + } + + /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s with the + /// success result of the corresponding [Rank]. + pub struct EnsureAmbassadorsVoice; + impl> + From> EnsureOrigin for EnsureAmbassadorsVoice { + type Success = Rank; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| Origin::as_voice(&o).ok_or(O::from(o))) + } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::MasterAmbassadorsTier9)) + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs new file mode 100644 index 00000000000..d4a2d3bbf1c --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/ambassador/tracks.rs @@ -0,0 +1,282 @@ +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Ambassador Program's referenda voting tracks. + +use super::Origin; +use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS}; +use sp_runtime::Perbill; + +/// Referendum `TrackId` type. +pub type TrackId = u16; + +/// Referendum track IDs. +pub mod constants { + use super::TrackId; + + pub const AMBASSADOR_TIER_1: TrackId = 1; + pub const AMBASSADOR_TIER_2: TrackId = 2; + pub const SENIOR_AMBASSADOR_TIER_3: TrackId = 3; + pub const SENIOR_AMBASSADOR_TIER_4: TrackId = 4; + pub const HEAD_AMBASSADOR_TIER_5: TrackId = 5; + pub const HEAD_AMBASSADOR_TIER_6: TrackId = 6; + pub const HEAD_AMBASSADOR_TIER_7: TrackId = 7; + pub const MASTER_AMBASSADOR_TIER_8: TrackId = 8; + pub const MASTER_AMBASSADOR_TIER_9: TrackId = 9; +} + +/// The type implementing the [`pallet_referenda::TracksInfo`] trait for referenda pallet. +pub struct TracksInfo; + +/// Information on the voting tracks. +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = TrackId; + + type RuntimeOrigin = ::PalletsOrigin; + + /// Return the array of available tracks and their information. + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + static DATA: [(TrackId, pallet_referenda::TrackInfo); 9] = [ + ( + constants::AMBASSADOR_TIER_1, + pallet_referenda::TrackInfo { + name: "ambassador tier 1", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::AMBASSADOR_TIER_2, + pallet_referenda::TrackInfo { + name: "ambassador tier 2", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::SENIOR_AMBASSADOR_TIER_3, + pallet_referenda::TrackInfo { + name: "senior ambassador tier 3", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::SENIOR_AMBASSADOR_TIER_4, + pallet_referenda::TrackInfo { + name: "senior ambassador tier 4", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_5, + pallet_referenda::TrackInfo { + name: "head ambassador tier 5", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_6, + pallet_referenda::TrackInfo { + name: "head ambassador tier 6", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::HEAD_AMBASSADOR_TIER_7, + pallet_referenda::TrackInfo { + name: "head ambassador tier 7", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::MASTER_AMBASSADOR_TIER_8, + pallet_referenda::TrackInfo { + name: "master ambassador tier 8", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ( + constants::MASTER_AMBASSADOR_TIER_9, + pallet_referenda::TrackInfo { + name: "master ambassador tier 9", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 24 * HOURS, + decision_period: 1 * DAYS, + confirm_period: 24 * HOURS, + min_enactment_period: 1 * HOURS, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(50), + }, + }, + ), + ]; + &DATA[..] + } + + /// Determine the voting track for the given `origin`. + fn track_for(id: &Self::RuntimeOrigin) -> Result { + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(constants::MASTER_AMBASSADOR_TIER_9) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::Ambassadors) => Ok(constants::AMBASSADOR_TIER_1), + Ok(Origin::AmbassadorsTier2) => Ok(constants::AMBASSADOR_TIER_2), + Ok(Origin::SeniorAmbassadors) => Ok(constants::SENIOR_AMBASSADOR_TIER_3), + Ok(Origin::SeniorAmbassadorsTier4) => Ok(constants::SENIOR_AMBASSADOR_TIER_4), + Ok(Origin::HeadAmbassadors) => Ok(constants::HEAD_AMBASSADOR_TIER_5), + Ok(Origin::HeadAmbassadorsTier6) => Ok(constants::HEAD_AMBASSADOR_TIER_6), + Ok(Origin::HeadAmbassadorsTier7) => Ok(constants::HEAD_AMBASSADOR_TIER_7), + Ok(Origin::MasterAmbassadors) => Ok(constants::MASTER_AMBASSADOR_TIER_8), + Ok(Origin::MasterAmbassadorsTier9) => Ok(constants::MASTER_AMBASSADOR_TIER_9), + _ => Err(()), + } + } +} + +// implements [`frame_support::traits::Get`] for [`TracksInfo`] +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs new file mode 100644 index 00000000000..b7412705dde --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/mod.rs @@ -0,0 +1,238 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! The Westend Technical Fellowship. + +mod origins; +mod tracks; +use crate::{ + impls::ToParentTreasury, + weights, + xcm_config::{FellowshipAdminBodyId, UsdtAssetHub}, + AccountId, Balance, Balances, FellowshipReferenda, GovernanceLocation, Preimage, Runtime, + RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, WestendTreasuryAccount, DAYS, +}; +use frame_support::{ + parameter_types, + traits::{EitherOf, EitherOfDiverse, MapSuccess, OriginTrait, TryWithMorphedArg}, +}; +use frame_system::EnsureRootWithSuccess; +pub use origins::{ + pallet_origins as pallet_fellowship_origins, Architects, EnsureCanPromoteTo, EnsureCanRetainAt, + EnsureFellowship, Fellows, Masters, Members, ToVoice, +}; +use pallet_ranked_collective::EnsureOfRank; +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use parachains_common::{polkadot::account, HOURS}; +use sp_core::{ConstU128, ConstU32}; +use sp_runtime::traits::{AccountIdConversion, ConstU16, ConvertToValue, Replace, TakeFirst}; +use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; + +#[cfg(feature = "runtime-benchmarks")] +use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; + +/// The Fellowship members' ranks. +pub mod ranks { + use pallet_ranked_collective::Rank; + + pub const DAN_1: Rank = 1; // aka Members. + pub const DAN_2: Rank = 2; + pub const DAN_3: Rank = 3; // aka Fellows. + pub const DAN_4: Rank = 4; // aka Architects. + pub const DAN_5: Rank = 5; + pub const DAN_6: Rank = 6; + pub const DAN_7: Rank = 7; // aka Masters. + pub const DAN_8: Rank = 8; + pub const DAN_9: Rank = 9; +} + +parameter_types! { + // Referenda pallet account, used to temporarily deposit slashed imbalance before teleporting. + pub ReferendaPalletAccount: AccountId = account::REFERENDA_PALLET_ID.into_account_truncating(); +} + +impl pallet_fellowship_origins::Config for Runtime {} + +pub type FellowshipReferendaInstance = pallet_referenda::Instance1; + +impl pallet_referenda::Config for Runtime { + type WeightInfo = weights::pallet_referenda_fellowship_referenda::WeightInfo; + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type Scheduler = Scheduler; + type Currency = Balances; + // Fellows can submit proposals. + type SubmitOrigin = EitherOf< + pallet_ranked_collective::EnsureMember, + MapSuccess< + TryWithMorphedArg< + RuntimeOrigin, + ::PalletsOrigin, + ToVoice, + EnsureOfRank, + (AccountId, u16), + >, + TakeFirst, + >, + >; + type CancelOrigin = Architects; + type KillOrigin = Masters; + type Slash = ToParentTreasury; + type Votes = pallet_ranked_collective::Votes; + type Tally = pallet_ranked_collective::TallyOf; + type SubmissionDeposit = ConstU128<0>; + type MaxQueued = ConstU32<100>; + type UndecidingTimeout = ConstU32<{ 7 * DAYS }>; + type AlarmInterval = ConstU32<1>; + type Tracks = tracks::TracksInfo; + type Preimages = Preimage; +} + +pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + +impl pallet_ranked_collective::Config for Runtime { + type WeightInfo = weights::pallet_ranked_collective_fellowship_collective::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + // Promotions and the induction of new members are serviced by `FellowshipCore` pallet instance. + type PromoteOrigin = frame_system::EnsureNever; + #[cfg(feature = "runtime-benchmarks")] + // The maximum value of `u16` set as a success value for the root to ensure the benchmarks will + // pass. + type PromoteOrigin = EnsureRootWithSuccess>; + + // Demotion is by any of: + // - Root can demote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // + // The maximum value of `u16` set as a success value for the root to ensure the benchmarks will + // pass. + type DemoteOrigin = EitherOf< + EnsureRootWithSuccess>, + MapSuccess< + EnsureXcm>, + Replace>, + >, + >; + type Polls = FellowshipReferenda; + type MinRankOfClass = tracks::MinRankOfClass; + type VoteWeight = pallet_ranked_collective::Geometric; +} + +pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; + +impl pallet_core_fellowship::Config for Runtime { + type WeightInfo = weights::pallet_core_fellowship_fellowship_core::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Members = pallet_ranked_collective::Pallet; + type Balance = Balance; + // Parameters are set by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote among all Fellows. + type ParamsOrigin = EitherOfDiverse< + EnsureXcm>, + Fellows, + >; + // Induction (creating a candidate) is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a single Fellow; + // - a vote among all Members. + type InductOrigin = EitherOfDiverse< + EnsureXcm>, + EitherOfDiverse< + pallet_ranked_collective::EnsureMember< + Runtime, + FellowshipCollectiveInstance, + { ranks::DAN_3 }, + >, + Members, + >, + >; + // Approval (rank-retention) of a Member's current rank is by any of: + // - Root; + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank two above the current rank for all retention up to the Master rank. + type ApproveOrigin = EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + EnsureCanRetainAt, + >; + // Promotion is by any of: + // - Root can promote arbitrarily. + // - the FellowshipAdmin origin (i.e. token holder referendum); + // - a vote by the rank two above the new rank for all promotions up to the Master rank. + type PromoteOrigin = EitherOf< + MapSuccess< + EnsureXcm>, + Replace>, + >, + EnsureCanPromoteTo, + >; + type EvidenceSize = ConstU32<65536>; +} + +pub type FellowshipSalaryInstance = pallet_salary::Instance1; + +use xcm::prelude::*; + +parameter_types! { + // The interior location on AssetHub for the paying account. This is the Fellowship Salary + // pallet instance (which sits at index 64). This sovereign account will need funding. + pub Interior: InteriorMultiLocation = PalletInstance(64).into(); +} + +const USDT_UNITS: u128 = 1_000_000; + +/// [`PayOverXcm`] setup to pay the Fellowship salary on the AssetHub in USDT. +pub type FellowshipSalaryPaymaster = PayOverXcm< + Interior, + crate::xcm_config::XcmRouter, + crate::PolkadotXcm, + ConstU32<{ 6 * HOURS }>, + AccountId, + (), + ConvertToValue, + AliasesIntoAccountId32<(), AccountId>, +>; + +impl pallet_salary::Config for Runtime { + type WeightInfo = weights::pallet_salary_fellowship_salary::WeightInfo; + type RuntimeEvent = RuntimeEvent; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Paymaster = FellowshipSalaryPaymaster; + #[cfg(feature = "runtime-benchmarks")] + type Paymaster = PayWithEnsure>>; + type Members = pallet_ranked_collective::Pallet; + + #[cfg(not(feature = "runtime-benchmarks"))] + type Salary = pallet_core_fellowship::Pallet; + #[cfg(feature = "runtime-benchmarks")] + type Salary = frame_support::traits::tokens::ConvertRank< + crate::impls::benchmarks::RankToSalary, + >; + // 15 days to register for a salary payment. + type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; + // 15 days to claim the salary payment. + type PayoutPeriod = ConstU32<{ 15 * DAYS }>; + // Total monthly salary budget. + type Budget = ConstU128<{ 100_000 * USDT_UNITS }>; +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs new file mode 100644 index 00000000000..5ed2c19f79e --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/origins.rs @@ -0,0 +1,247 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Fellowship custom origins. + +use super::ranks; +pub use pallet_origins::*; + +#[frame_support::pallet] +pub mod pallet_origins { + use super::ranks; + use frame_support::pallet_prelude::*; + use pallet_ranked_collective::Rank; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] + #[pallet::origin] + pub enum Origin { + /// Origin aggregated through weighted votes of those with rank 1 or above; `Success` is 1. + /// Aka the "voice" of all Members. + Members, + /// Origin aggregated through weighted votes of those with rank 2 or above; `Success` is 2. + /// Aka the "voice" of members at least II Dan. + Fellowship2Dan, + /// Origin aggregated through weighted votes of those with rank 3 or above; `Success` is 3. + /// Aka the "voice" of all Fellows. + Fellows, + /// Origin aggregated through weighted votes of those with rank 4 or above; `Success` is 4. + /// Aka the "voice" of members at least IV Dan. + Architects, + /// Origin aggregated through weighted votes of those with rank 5 or above; `Success` is 5. + /// Aka the "voice" of members at least V Dan. + Fellowship5Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above; `Success` is 6. + /// Aka the "voice" of members at least VI Dan. + Fellowship6Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above; `Success` is 7. + /// Aka the "voice" of all Masters. + Masters, + /// Origin aggregated through weighted votes of those with rank 8 or above; `Success` is 8. + /// Aka the "voice" of members at least VIII Dan. + Fellowship8Dan, + /// Origin aggregated through weighted votes of those with rank 9 or above; `Success` is 9. + /// Aka the "voice" of members at least IX Dan. + Fellowship9Dan, + + /// Origin aggregated through weighted votes of those with rank 3 or above when voting on + /// a fortnight-long track; `Success` is 1. + RetainAt1Dan, + /// Origin aggregated through weighted votes of those with rank 4 or above when voting on + /// a fortnight-long track; `Success` is 2. + RetainAt2Dan, + /// Origin aggregated through weighted votes of those with rank 5 or above when voting on + /// a fortnight-long track; `Success` is 3. + RetainAt3Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above when voting on + /// a fortnight-long track; `Success` is 4. + RetainAt4Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above when voting on + /// a fortnight-long track; `Success` is 5. + RetainAt5Dan, + /// Origin aggregated through weighted votes of those with rank 8 or above when voting on + /// a fortnight-long track; `Success` is 6. + RetainAt6Dan, + + /// Origin aggregated through weighted votes of those with rank 3 or above when voting on + /// a month-long track; `Success` is 1. + PromoteTo1Dan, + /// Origin aggregated through weighted votes of those with rank 4 or above when voting on + /// a month-long track; `Success` is 2. + PromoteTo2Dan, + /// Origin aggregated through weighted votes of those with rank 5 or above when voting on + /// a month-long track; `Success` is 3. + PromoteTo3Dan, + /// Origin aggregated through weighted votes of those with rank 6 or above when voting on + /// a month-long track; `Success` is 4. + PromoteTo4Dan, + /// Origin aggregated through weighted votes of those with rank 7 or above when voting on + /// a month-long track; `Success` is 5. + PromoteTo5Dan, + /// Origin aggregated through weighted votes of those with rank 8 or above when voting on + /// a month-long track; `Success` is 6. + PromoteTo6Dan, + } + + impl Origin { + /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for + /// any. + /// + /// `Some` will be returned only for the first 9 elements of [Origin]. + pub fn as_voice(&self) -> Option { + Some(match &self { + Origin::Members => ranks::DAN_1, + Origin::Fellowship2Dan => ranks::DAN_2, + Origin::Fellows => ranks::DAN_3, + Origin::Architects => ranks::DAN_4, + Origin::Fellowship5Dan => ranks::DAN_5, + Origin::Fellowship6Dan => ranks::DAN_6, + Origin::Masters => ranks::DAN_7, + Origin::Fellowship8Dan => ranks::DAN_8, + Origin::Fellowship9Dan => ranks::DAN_9, + _ => return None, + }) + } + } + + /// A `TryMorph` implementation which is designed to convert an aggregate `RuntimeOrigin` + /// value into the Fellowship voice it represents if it is a Fellowship pallet origin an + /// appropriate variant. See also [Origin::as_voice]. + pub struct ToVoice; + impl<'a, O: 'a + TryInto<&'a Origin>> sp_runtime::traits::TryMorph for ToVoice { + type Outcome = pallet_ranked_collective::Rank; + fn try_morph(o: O) -> Result { + o.try_into().ok().and_then(Origin::as_voice).ok_or(()) + } + } + + macro_rules! decl_unit_ensures { + ( $name:ident: $success_type:ty = $success:expr ) => { + pub struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + Origin::$name => Ok($success), + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + Ok(O::from(Origin::$name)) + } + } + }; + ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; + ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { + decl_unit_ensures! { $name: $success_type = $success } + decl_unit_ensures! { $( $rest )* } + }; + ( $name:ident, $( $rest:tt )* ) => { + decl_unit_ensures! { $name } + decl_unit_ensures! { $( $rest )* } + }; + () => {} + } + decl_unit_ensures!( + Members: Rank = ranks::DAN_1, + Fellows: Rank = ranks::DAN_3, + Architects: Rank = ranks::DAN_4, + Masters: Rank = ranks::DAN_7, + ); + + macro_rules! decl_ensure { + ( + $vis:vis type $name:ident: EnsureOrigin { + $( $item:ident = $success:expr, )* + } + ) => { + $vis struct $name; + impl> + From> + EnsureOrigin for $name + { + type Success = $success_type; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + $( + Origin::$item => Ok($success), + )* + r => Err(O::from(r)), + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + // By convention the more privileged origins go later, so for greatest chance + // of success, we want the last one. + let _result: Result = Err(()); + $( + let _result: Result = Ok(O::from(Origin::$item)); + )* + _result + } + } + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success` on a + // week-long track. + decl_ensure! { + pub type EnsureFellowship: EnsureOrigin { + Members = ranks::DAN_1, + Fellowship2Dan = ranks::DAN_2, + Fellows = ranks::DAN_3, + Architects = ranks::DAN_4, + Fellowship5Dan = ranks::DAN_5, + Fellowship6Dan = ranks::DAN_6, + Masters = ranks::DAN_7, + Fellowship8Dan = ranks::DAN_8, + Fellowship9Dan = ranks::DAN_9, + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success + 2` on + // a fortnight-long track; needed for Fellowship retention voting. + decl_ensure! { + pub type EnsureCanRetainAt: EnsureOrigin { + RetainAt1Dan = ranks::DAN_1, + RetainAt2Dan = ranks::DAN_2, + RetainAt3Dan = ranks::DAN_3, + RetainAt4Dan = ranks::DAN_4, + RetainAt5Dan = ranks::DAN_5, + RetainAt6Dan = ranks::DAN_6, + } + } + + // Fellowship origin indicating weighted voting from at least the rank of `Success + 2` on + // a month-long track; needed for Fellowship promotion voting. + decl_ensure! { + pub type EnsureCanPromoteTo: EnsureOrigin { + PromoteTo1Dan = ranks::DAN_1, + PromoteTo2Dan = ranks::DAN_2, + PromoteTo3Dan = ranks::DAN_3, + PromoteTo4Dan = ranks::DAN_4, + PromoteTo5Dan = ranks::DAN_5, + PromoteTo6Dan = ranks::DAN_6, + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs new file mode 100644 index 00000000000..099bdf4cf75 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/fellowship/tracks.rs @@ -0,0 +1,532 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Track configurations for Fellowship. + +use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS, MINUTES}; +use pallet_ranked_collective::Rank; +use sp_runtime::{traits::Convert, Perbill}; + +/// Referendum `TrackId` type. +pub type TrackId = u16; + +/// Referendum track IDs. +pub mod constants { + use super::TrackId; + + // Regular tracks (7 days) used for general operations. The required rank for voting is the + // same as that which is named (and also the track ID). + pub const MEMBERS: TrackId = 1; + pub const PROFICIENTS: TrackId = 2; + pub const FELLOWS: TrackId = 3; + pub const ARCHITECTS: TrackId = 4; + pub const ARCHITECTS_ADEPT: TrackId = 5; + pub const GRAND_ARCHITECTS: TrackId = 6; + pub const MASTERS: TrackId = 7; + pub const MASTERS_CONSTANT: TrackId = 8; + pub const GRAND_MASTERS: TrackId = 9; + + // Longer tracks (14 days) used for rank retention. These require a rank of two more than the + // grade at which they retain (as per the whitepaper). This works out as the track ID minus 8. + pub const RETAIN_AT_1DAN: TrackId = 11; + pub const RETAIN_AT_2DAN: TrackId = 12; + pub const RETAIN_AT_3DAN: TrackId = 13; + pub const RETAIN_AT_4DAN: TrackId = 14; + pub const RETAIN_AT_5DAN: TrackId = 15; + pub const RETAIN_AT_6DAN: TrackId = 16; + + // Longest tracks (30 days) used for promotions. These require a rank of two more than the + // grade to which they promote (as per the whitepaper). This works out as the track ID minus 18. + pub const PROMOTE_TO_1DAN: TrackId = 21; + pub const PROMOTE_TO_2DAN: TrackId = 22; + pub const PROMOTE_TO_3DAN: TrackId = 23; + pub const PROMOTE_TO_4DAN: TrackId = 24; + pub const PROMOTE_TO_5DAN: TrackId = 25; + pub const PROMOTE_TO_6DAN: TrackId = 26; +} + +/// Convert the track ID (defined above) into the minimum rank (i.e. fellowship Dan grade) required +/// to vote on the track. +pub struct MinRankOfClass; +impl Convert for MinRankOfClass { + fn convert(a: TrackId) -> Rank { + match a { + // Just a regular vote: the track ID is conveniently the same as the minimum rank. + regular @ 1..=9 => regular, + // A retention vote; the track ID turns out to be 8 more than the minimum required rank. + retention @ 11..=16 => retention - 8, + // A promotion vote; the track ID turns out to be 18 more than the minimum required + // rank. + promotion @ 21..=26 => promotion - 18, + _ => Rank::max_value(), + } + } +} + +const RETAIN_MAX_DECIDING: u32 = 25; +const RETAIN_DECISION_DEPOSIT: Balance = 5 * DOLLARS; +const RETAIN_PREPARE_PERIOD: BlockNumber = 0; +const RETAIN_DECISION_PERIOD: BlockNumber = 14 * DAYS; +const RETAIN_CONFIRM_PERIOD: BlockNumber = 1 * HOURS; +const RETAIN_MIN_ENACTMENT_PERIOD: BlockNumber = 0; +const RETAIN_MIN_APPROVAL: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(60), + ceil: Perbill::from_percent(100), +}; +const RETAIN_MIN_SUPPORT: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(100), +}; + +const PROMOTE_MAX_DECIDING: u32 = 10; +const PROMOTE_DECISION_DEPOSIT: Balance = 5 * DOLLARS; +const PROMOTE_PREPARE_PERIOD: BlockNumber = 0; +const PROMOTE_DECISION_PERIOD: BlockNumber = 30 * DAYS; +const PROMOTE_CONFIRM_PERIOD: BlockNumber = 1 * HOURS; +const PROMOTE_MIN_ENACTMENT_PERIOD: BlockNumber = 0; +const PROMOTE_MIN_APPROVAL: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(60), + ceil: Perbill::from_percent(100), +}; +const PROMOTE_MIN_SUPPORT: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(10), + ceil: Perbill::from_percent(100), +}; + +pub struct TracksInfo; +impl pallet_referenda::TracksInfo for TracksInfo { + type Id = TrackId; + type RuntimeOrigin = ::PalletsOrigin; + fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { + use constants as tracks; + static DATA: [(TrackId, pallet_referenda::TrackInfo); 21] = [ + ( + tracks::MEMBERS, + pallet_referenda::TrackInfo { + name: "members", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::PROFICIENTS, + pallet_referenda::TrackInfo { + name: "proficient members", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::FELLOWS, + pallet_referenda::TrackInfo { + name: "fellows", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::ARCHITECTS, + pallet_referenda::TrackInfo { + name: "architects", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::ARCHITECTS_ADEPT, + pallet_referenda::TrackInfo { + name: "architects adept", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::GRAND_ARCHITECTS, + pallet_referenda::TrackInfo { + name: "grand architects", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::MASTERS, + pallet_referenda::TrackInfo { + name: "masters", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::MASTERS_CONSTANT, + pallet_referenda::TrackInfo { + name: "masters constant", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::GRAND_MASTERS, + pallet_referenda::TrackInfo { + name: "grand masters", + max_deciding: 10, + decision_deposit: 5 * DOLLARS, + prepare_period: 30 * MINUTES, + decision_period: 1 * DAYS, + confirm_period: 30 * MINUTES, + min_enactment_period: 5 * MINUTES, + min_approval: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(50), + ceil: Perbill::from_percent(100), + }, + min_support: pallet_referenda::Curve::LinearDecreasing { + length: Perbill::from_percent(100), + floor: Perbill::from_percent(0), + ceil: Perbill::from_percent(100), + }, + }, + ), + ( + tracks::RETAIN_AT_1DAN, + pallet_referenda::TrackInfo { + name: "retain at I Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_2DAN, + pallet_referenda::TrackInfo { + name: "retain at II Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_3DAN, + pallet_referenda::TrackInfo { + name: "retain at III Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_4DAN, + pallet_referenda::TrackInfo { + name: "retain at IV Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_5DAN, + pallet_referenda::TrackInfo { + name: "retain at V Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::RETAIN_AT_6DAN, + pallet_referenda::TrackInfo { + name: "retain at VI Dan", + max_deciding: RETAIN_MAX_DECIDING, + decision_deposit: RETAIN_DECISION_DEPOSIT, + prepare_period: RETAIN_PREPARE_PERIOD, + decision_period: RETAIN_DECISION_PERIOD, + confirm_period: RETAIN_CONFIRM_PERIOD, + min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, + min_approval: RETAIN_MIN_APPROVAL, + min_support: RETAIN_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_1DAN, + pallet_referenda::TrackInfo { + name: "promote to I Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_2DAN, + pallet_referenda::TrackInfo { + name: "promote to II Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_3DAN, + pallet_referenda::TrackInfo { + name: "promote to III Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_4DAN, + pallet_referenda::TrackInfo { + name: "promote to IV Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_5DAN, + pallet_referenda::TrackInfo { + name: "promote to V Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ( + tracks::PROMOTE_TO_6DAN, + pallet_referenda::TrackInfo { + name: "promote to VI Dan", + max_deciding: PROMOTE_MAX_DECIDING, + decision_deposit: PROMOTE_DECISION_DEPOSIT, + prepare_period: PROMOTE_PREPARE_PERIOD, + decision_period: PROMOTE_DECISION_PERIOD, + confirm_period: PROMOTE_CONFIRM_PERIOD, + min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, + min_approval: PROMOTE_MIN_APPROVAL, + min_support: PROMOTE_MIN_SUPPORT, + }, + ), + ]; + &DATA[..] + } + fn track_for(id: &Self::RuntimeOrigin) -> Result { + use super::origins::Origin; + use constants as tracks; + + #[cfg(feature = "runtime-benchmarks")] + { + // For benchmarks, we enable a root origin. + // It is important that this is not available in production! + let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); + if &root == id { + return Ok(tracks::GRAND_MASTERS) + } + } + + match Origin::try_from(id.clone()) { + Ok(Origin::Members) => Ok(tracks::MEMBERS), + Ok(Origin::Fellowship2Dan) => Ok(tracks::PROFICIENTS), + Ok(Origin::Fellows) => Ok(tracks::FELLOWS), + Ok(Origin::Architects) => Ok(tracks::ARCHITECTS), + Ok(Origin::Fellowship5Dan) => Ok(tracks::ARCHITECTS_ADEPT), + Ok(Origin::Fellowship6Dan) => Ok(tracks::GRAND_ARCHITECTS), + Ok(Origin::Masters) => Ok(tracks::MASTERS), + Ok(Origin::Fellowship8Dan) => Ok(tracks::MASTERS_CONSTANT), + Ok(Origin::Fellowship9Dan) => Ok(tracks::GRAND_MASTERS), + + Ok(Origin::RetainAt1Dan) => Ok(tracks::RETAIN_AT_1DAN), + Ok(Origin::RetainAt2Dan) => Ok(tracks::RETAIN_AT_2DAN), + Ok(Origin::RetainAt3Dan) => Ok(tracks::RETAIN_AT_3DAN), + Ok(Origin::RetainAt4Dan) => Ok(tracks::RETAIN_AT_4DAN), + Ok(Origin::RetainAt5Dan) => Ok(tracks::RETAIN_AT_5DAN), + Ok(Origin::RetainAt6Dan) => Ok(tracks::RETAIN_AT_6DAN), + + Ok(Origin::PromoteTo1Dan) => Ok(tracks::PROMOTE_TO_1DAN), + Ok(Origin::PromoteTo2Dan) => Ok(tracks::PROMOTE_TO_2DAN), + Ok(Origin::PromoteTo3Dan) => Ok(tracks::PROMOTE_TO_3DAN), + Ok(Origin::PromoteTo4Dan) => Ok(tracks::PROMOTE_TO_4DAN), + Ok(Origin::PromoteTo5Dan) => Ok(tracks::PROMOTE_TO_5DAN), + Ok(Origin::PromoteTo6Dan) => Ok(tracks::PROMOTE_TO_6DAN), + + _ => Err(()), + } + } +} +pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs new file mode 100644 index 00000000000..9f4c2a6a4c9 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/impls.rs @@ -0,0 +1,229 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::OriginCaller; +use frame_support::{ + dispatch::DispatchResultWithPostInfo, + traits::{Currency, Get, Imbalance, OnUnbalanced, OriginTrait, PrivilegeCmp}, + weights::Weight, +}; +use log; +use pallet_alliance::{ProposalIndex, ProposalProvider}; +use parachains_common::impls::NegativeImbalance; +use sp_runtime::DispatchError; +use sp_std::{cmp::Ordering, marker::PhantomData, prelude::*}; +use xcm::latest::{Fungibility, Junction, Parent}; + +type AccountIdOf = ::AccountId; + +type ProposalOf = >::Proposal; + +type HashOf = ::Hash; + +/// Type alias to conveniently refer to the `Currency::Balance` associated type. +pub type BalanceOf = + as Currency<::AccountId>>::Balance; + +/// Implements `OnUnbalanced::on_unbalanced` to teleport slashed assets to relay chain treasury +/// account. +pub struct ToParentTreasury( + PhantomData<(TreasuryAccount, PalletAccount, T)>, +); + +impl OnUnbalanced> + for ToParentTreasury +where + T: pallet_balances::Config + pallet_xcm::Config + frame_system::Config, + <::RuntimeOrigin as OriginTrait>::AccountId: From>, + [u8; 32]: From<::AccountId>, + TreasuryAccount: Get>, + PalletAccount: Get>, + BalanceOf: Into, +{ + fn on_unbalanced(amount: NegativeImbalance) { + let amount = match amount.drop_zero() { + Ok(..) => return, + Err(amount) => amount, + }; + let imbalance = amount.peek(); + let pallet_acc: AccountIdOf = PalletAccount::get(); + let treasury_acc: AccountIdOf = TreasuryAccount::get(); + + >::resolve_creating(&pallet_acc, amount); + + let result = >::teleport_assets( + <::RuntimeOrigin>::signed(pallet_acc.into()), + Box::new(Parent.into()), + Box::new( + Junction::AccountId32 { network: None, id: treasury_acc.into() } + .into_location() + .into(), + ), + Box::new((Parent, imbalance).into()), + 0, + ); + + if let Err(err) = result { + log::warn!("Failed to teleport slashed assets: {:?}", err); + } + } +} + +/// Proposal provider for alliance pallet. +/// Adapter from collective pallet to alliance proposal provider trait. +pub struct AllianceProposalProvider(PhantomData<(T, I)>); + +impl ProposalProvider, HashOf, ProposalOf> + for AllianceProposalProvider +where + T: pallet_collective::Config + frame_system::Config, + I: 'static, +{ + fn propose_proposal( + who: AccountIdOf, + threshold: u32, + proposal: Box>, + length_bound: u32, + ) -> Result<(u32, u32), DispatchError> { + pallet_collective::Pallet::::do_propose_proposed( + who, + threshold, + proposal, + length_bound, + ) + } + + fn vote_proposal( + who: AccountIdOf, + proposal: HashOf, + index: ProposalIndex, + approve: bool, + ) -> Result { + pallet_collective::Pallet::::do_vote(who, proposal, index, approve) + } + + fn close_proposal( + proposal_hash: HashOf, + proposal_index: ProposalIndex, + proposal_weight_bound: Weight, + length_bound: u32, + ) -> DispatchResultWithPostInfo { + pallet_collective::Pallet::::do_close( + proposal_hash, + proposal_index, + proposal_weight_bound, + length_bound, + ) + } + + fn proposal_of(proposal_hash: HashOf) -> Option> { + pallet_collective::Pallet::::proposal_of(proposal_hash) + } +} + +/// Used to compare the privilege of an origin inside the scheduler. +pub struct EqualOrGreatestRootCmp; + +impl PrivilegeCmp for EqualOrGreatestRootCmp { + fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option { + if left == right { + return Some(Ordering::Equal) + } + match (left, right) { + // Root is greater than anything. + (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), + _ => None, + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarks { + use super::*; + use crate::ParachainSystem; + use cumulus_primitives_core::{ChannelStatus, GetChannelInfo}; + use frame_support::traits::{ + fungible, + tokens::{Pay, PaymentStatus}, + }; + use pallet_ranked_collective::Rank; + use parachains_common::{AccountId, Balance}; + use sp_runtime::traits::Convert; + + /// Rank to salary conversion helper type. + pub struct RankToSalary(PhantomData); + impl Convert for RankToSalary + where + Fungible: fungible::Inspect, + { + fn convert(r: Rank) -> Balance { + Balance::from(r).saturating_mul(Fungible::minimum_balance()) + } + } + + /// Trait for setting up any prerequisites for successful execution of benchmarks. + pub trait EnsureSuccessful { + fn ensure_successful(); + } + + /// Implementation of the [`EnsureSuccessful`] trait which opens an HRMP channel between + /// the Collectives and a parachain with a given ID. + pub struct OpenHrmpChannel(PhantomData); + impl> EnsureSuccessful for OpenHrmpChannel { + fn ensure_successful() { + if let ChannelStatus::Closed = ParachainSystem::get_channel_status(I::get().into()) { + ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(I::get().into()) + } + } + } + + /// Type that wraps a type implementing the [`Pay`] trait to decorate its + /// [`Pay::ensure_successful`] function with a provided implementation of the + /// [`EnsureSuccessful`] trait. + pub struct PayWithEnsure(PhantomData<(O, E)>); + impl Pay for PayWithEnsure + where + O: Pay, + E: EnsureSuccessful, + { + type AssetKind = O::AssetKind; + type Balance = O::Balance; + type Beneficiary = O::Beneficiary; + type Error = O::Error; + type Id = O::Id; + + fn pay( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ) -> Result { + O::pay(who, asset_kind, amount) + } + fn check_payment(id: Self::Id) -> PaymentStatus { + O::check_payment(id) + } + fn ensure_successful( + who: &Self::Beneficiary, + asset_kind: Self::AssetKind, + amount: Self::Balance, + ) { + E::ensure_successful(); + O::ensure_successful(who, asset_kind, amount) + } + fn ensure_concluded(id: Self::Id) { + O::ensure_concluded(id) + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs new file mode 100644 index 00000000000..8c5593e154d --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -0,0 +1,1023 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Collectives Parachain +//! +//! This parachain is for collectives that serve the Westend network. +//! Each collective is defined by a specialized (possibly instanced) pallet. +//! +//! ### Governance +//! +//! As a system parachain, Collectives defers its governance (namely, its `Root` origin), to +//! its Relay Chain parent, Westend. +//! +//! ### Collator Selection +//! +//! Collectives uses `pallet-collator-selection`, a simple first-come-first-served registration +//! system where collators can reserve a small bond to join the block producer set. There is no +//! slashing. Collective members are generally expected to run collators. + +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod ambassador; +pub mod impls; +mod weights; +pub mod xcm_config; +// Fellowship configurations. +pub mod fellowship; +pub use ambassador::pallet_ambassador_origins; + +use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use fellowship::{pallet_fellowship_origins, Fellows}; +use impls::{AllianceProposalProvider, EqualOrGreatestRootCmp, ToParentTreasury}; +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, Perbill, +}; + +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use codec::{Decode, Encode, MaxEncodedLen}; +use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; +use frame_support::{ + construct_runtime, + dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, + traits::{ + fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, + EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, + }, + weights::{ConstantMultiplier, Weight}, + PalletId, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +pub use parachains_common as common; +use parachains_common::{ + impls::DealWithFees, message_queue::*, AccountId, AuraId, Balance, BlockNumber, Hash, Header, + Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, MINUTES, + NORMAL_DISPATCH_RATIO, SLOT_DURATION, +}; +use sp_runtime::RuntimeDebug; +use testnets_common::westend::{account::*, consensus::*, currency::*, fee::WeightToFee}; +use xcm_config::{GovernanceLocation, XcmOriginToTransactDispatchOrigin}; + +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; + +// Polkadot imports +use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; +use xcm::latest::{prelude::*, BodyId}; + +use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("collectives-westend"), + impl_name: create_runtime_str!("collectives-westend"), + authoring_version: 1, + spec_version: 1_003_000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 5, + state_version: 0, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +/// Privileged origin that represents Root or more than two thirds of the Alliance. +pub type RootOrAllianceTwoThirdsMajority = EitherOfDiverse< + EnsureRoot, + pallet_collective::EnsureProportionMoreThan, +>; + +parameter_types! { + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +// Configure FRAME pallets to include in runtime. +impl frame_system::Config for Runtime { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type DbWeight = RocksDbWeight; + type Version = Version; + type PalletInfo = PalletInfo; + type OnNewAccount = (); + type OnKilledAccount = (); + type AccountData = pallet_balances::AccountData; + type SystemWeightInfo = weights::frame_system::WeightInfo; + type SS58Prefix = ConstU16<0>; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_authorship::Config for Runtime { + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type EventHandler = (CollatorSelection,); +} + +parameter_types! { + pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; +} + +impl pallet_balances::Config for Runtime { + type MaxLocks = ConstU32<50>; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = weights::pallet_balances::WeightInfo; + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; + type RuntimeHoldReason = RuntimeHoldReason; + type RuntimeFreezeReason = RuntimeFreezeReason; + type FreezeIdentifier = (); + type MaxHolds = ConstU32<1>; + type MaxFreezes = ConstU32<0>; +} + +parameter_types! { + /// Relay Chain `TransactionByteFee` / 10 + pub const TransactionByteFee: Balance = MILLICENTS; +} + +impl pallet_transaction_payment::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnChargeTransaction = + pallet_transaction_payment::CurrencyAdapter>; + type WeightToFee = WeightToFee; + type LengthToFee = ConstantMultiplier; + type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; + type OperationalFeeMultiplier = ConstU8<5>; +} + +parameter_types! { + // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. + pub const DepositBase: Balance = deposit(1, 88); + // Additional storage item size of 32 bytes. + pub const DepositFactor: Balance = deposit(0, 32); +} + +impl pallet_multisig::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type DepositBase = DepositBase; + type DepositFactor = DepositFactor; + type MaxSignatories = ConstU32<100>; + type WeightInfo = weights::pallet_multisig::WeightInfo; +} + +impl pallet_utility::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type PalletsOrigin = OriginCaller; + type WeightInfo = weights::pallet_utility::WeightInfo; +} + +parameter_types! { + // One storage item; key size 32, value size 8; . + pub const ProxyDepositBase: Balance = deposit(1, 40); + // Additional storage item size of 33 bytes. + pub const ProxyDepositFactor: Balance = deposit(0, 33); + // One storage item; key size 32, value size 16 + pub const AnnouncementDepositBase: Balance = deposit(1, 48); + pub const AnnouncementDepositFactor: Balance = deposit(0, 66); +} + +/// The type used to represent the kinds of proxying allowed. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + RuntimeDebug, + MaxEncodedLen, + scale_info::TypeInfo, +)] +pub enum ProxyType { + /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. + Any, + /// Can execute any call that does not transfer funds. + NonTransfer, + /// Proxy with the ability to reject time-delay proxy announcements. + CancelProxy, + /// Collator selection proxy. Can execute calls related to collator selection mechanism. + Collator, + /// Alliance proxy. Allows calls related to the Alliance. + Alliance, + /// Fellowship proxy. Allows calls related to the Fellowship. + Fellowship, + /// Ambassador proxy. Allows calls related to the Ambassador Program. + Ambassador, +} +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} +impl InstanceFilter for ProxyType { + fn filter(&self, c: &RuntimeCall) -> bool { + match self { + ProxyType::Any => true, + ProxyType::NonTransfer => !matches!(c, RuntimeCall::Balances { .. }), + ProxyType::CancelProxy => matches!( + c, + RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Collator => matches!( + c, + RuntimeCall::CollatorSelection { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Alliance => matches!( + c, + RuntimeCall::AllianceMotion { .. } | + RuntimeCall::Alliance { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Fellowship => matches!( + c, + RuntimeCall::FellowshipCollective { .. } | + RuntimeCall::FellowshipReferenda { .. } | + RuntimeCall::FellowshipCore { .. } | + RuntimeCall::FellowshipSalary { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + ProxyType::Ambassador => matches!( + c, + RuntimeCall::AmbassadorCollective { .. } | + RuntimeCall::AmbassadorReferenda { .. } | + RuntimeCall::AmbassadorContent { .. } | + RuntimeCall::AmbassadorCore { .. } | + RuntimeCall::AmbassadorSalary { .. } | + RuntimeCall::Utility { .. } | + RuntimeCall::Multisig { .. } + ), + } + } + fn is_superset(&self, o: &Self) -> bool { + match (self, o) { + (x, y) if x == y => true, + (ProxyType::Any, _) => true, + (_, ProxyType::Any) => false, + (ProxyType::NonTransfer, _) => true, + _ => false, + } + } +} + +impl pallet_proxy::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type Currency = Balances; + type ProxyType = ProxyType; + type ProxyDepositBase = ProxyDepositBase; + type ProxyDepositFactor = ProxyDepositFactor; + type MaxProxies = ConstU32<32>; + type WeightInfo = weights::pallet_proxy::WeightInfo; + type MaxPending = ConstU32<32>; + type CallHasher = BlakeTwo256; + type AnnouncementDepositBase = AnnouncementDepositBase; + type AnnouncementDepositFactor = AnnouncementDepositFactor; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type ReservedDmpWeight = ReservedDmpWeight; + type OutboundXcmpMessageSource = XcmpQueue; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; + type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, + >; +} + +impl parachain_info::Config for Runtime {} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: + type QueueChangeHandler = NarrowOriginToSibling; + type QueuePausedQuery = NarrowOriginToSibling; + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +parameter_types! { + /// The asset ID for the asset that we use to pay for message delivery fees. + pub FeeAssetId: AssetId = Concrete(xcm_config::WndLocation::get()); + /// The base fee for the message delivery fees. + pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); +} + +pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< + FeeAssetId, + BaseDeliveryFee, + TransactionByteFee, + XcmpQueue, +>; + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ChannelInfo = ParachainSystem; + type VersionWrapper = PolkadotXcm; + // Enqueue XCMP messages from siblings for later processing. + type XcmpQueue = TransformOrigin; + type MaxInboundSuspended = sp_core::ConstU32<1_000>; + type ControllerOrigin = EitherOfDiverse, Fellows>; + type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; + type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; + type PriceForSiblingDelivery = PriceForSiblingParachainDelivery; +} + +parameter_types! { + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +impl cumulus_pallet_dmp_queue::Config for Runtime { + type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type DmpSink = frame_support::traits::EnqueueWithOrigin; +} + +pub const PERIOD: u32 = 6 * HOURS; +pub const OFFSET: u32 = 0; + +impl pallet_session::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValidatorId = ::AccountId; + // we don't have stash and controller, thus we don't need the convert as well. + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ShouldEndSession = pallet_session::PeriodicSessions, ConstU32>; + type NextSessionRotation = pallet_session::PeriodicSessions, ConstU32>; + type SessionManager = CollatorSelection; + // Essentially just Aura, but let's be pedantic. + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type WeightInfo = weights::pallet_session::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; +} + +parameter_types! { + pub const PotId: PalletId = PalletId(*b"PotStake"); + pub const SessionLength: BlockNumber = 6 * HOURS; + // `StakingAdmin` pluralistic body. + pub const StakingAdminBodyId: BodyId = BodyId::Defense; +} + +/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. +pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< + EnsureRoot, + EnsureXcm>, +>; + +impl pallet_collator_selection::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type UpdateOrigin = CollatorSelectionUpdateOrigin; + type PotId = PotId; + type MaxCandidates = ConstU32<100>; + type MinEligibleCollators = ConstU32<4>; + type MaxInvulnerables = ConstU32<20>; + // should be a multiple of session or things will get inconsistent + type KickThreshold = ConstU32; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_collator_selection::IdentityCollator; + type ValidatorRegistration = Session; + type WeightInfo = weights::pallet_collator_selection::WeightInfo; +} + +pub const ALLIANCE_MOTION_DURATION: BlockNumber = 5 * DAYS; + +parameter_types! { + pub const AllianceMotionDuration: BlockNumber = ALLIANCE_MOTION_DURATION; + pub MaxProposalWeight: Weight = Perbill::from_percent(50) * RuntimeBlockWeights::get().max_block; +} +pub const ALLIANCE_MAX_PROPOSALS: u32 = 100; +pub const ALLIANCE_MAX_MEMBERS: u32 = 100; + +type AllianceCollective = pallet_collective::Instance1; +impl pallet_collective::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type Proposal = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type MotionDuration = AllianceMotionDuration; + type MaxProposals = ConstU32; + type MaxMembers = ConstU32; + type DefaultVote = pallet_collective::MoreThanMajorityThenPrimeDefaultVote; + type SetMembersOrigin = EnsureRoot; + type WeightInfo = weights::pallet_collective::WeightInfo; + type MaxProposalWeight = MaxProposalWeight; +} + +pub const MAX_FELLOWS: u32 = ALLIANCE_MAX_MEMBERS; +pub const MAX_ALLIES: u32 = 100; + +parameter_types! { + pub const AllyDeposit: Balance = 1_000 * UNITS; // 1,000 WND bond to join as an Ally + // The Alliance pallet account, used as a temporary place to deposit a slashed imbalance + // before the teleport to the Treasury. + pub AlliancePalletAccount: AccountId = ALLIANCE_PALLET_ID.into_account_truncating(); + pub WestendTreasuryAccount: AccountId = WESTEND_TREASURY_PALLET_ID.into_account_truncating(); + // The number of blocks a member must wait between giving a retirement notice and retiring. + // Supposed to be greater than time required to `kick_member` with alliance motion. + pub const AllianceRetirementPeriod: BlockNumber = (90 * DAYS) + ALLIANCE_MOTION_DURATION; +} + +impl pallet_alliance::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Proposal = RuntimeCall; + type AdminOrigin = RootOrAllianceTwoThirdsMajority; + type MembershipManager = RootOrAllianceTwoThirdsMajority; + type AnnouncementOrigin = RootOrAllianceTwoThirdsMajority; + type Currency = Balances; + type Slashed = ToParentTreasury; + type InitializeMembers = AllianceMotion; + type MembershipChanged = AllianceMotion; + type RetirementPeriod = AllianceRetirementPeriod; + type IdentityVerifier = (); // Don't block accounts on identity criteria + type ProposalProvider = AllianceProposalProvider; + type MaxProposals = ConstU32; + type MaxFellows = ConstU32; + type MaxAllies = ConstU32; + type MaxUnscrupulousItems = ConstU32<100>; + type MaxWebsiteUrlLength = ConstU32<255>; + type MaxAnnouncementsCount = ConstU32<100>; + type MaxMembersCount = ConstU32; + type AllyDeposit = AllyDeposit; + type WeightInfo = weights::pallet_alliance::WeightInfo; +} + +parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; +} + +#[cfg(not(feature = "runtime-benchmarks"))] +parameter_types! { + pub const MaxScheduledPerBlock: u32 = 50; +} + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub const MaxScheduledPerBlock: u32 = 200; +} + +impl pallet_scheduler::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type RuntimeCall = RuntimeCall; + type MaximumWeight = MaximumSchedulerWeight; + type ScheduleOrigin = EnsureRoot; + type MaxScheduledPerBlock = MaxScheduledPerBlock; + type WeightInfo = weights::pallet_scheduler::WeightInfo; + type OriginPrivilegeCmp = EqualOrGreatestRootCmp; + type Preimages = Preimage; +} + +parameter_types! { + pub const PreimageBaseDeposit: Balance = deposit(2, 64); + pub const PreimageByteDeposit: Balance = deposit(0, 1); + pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); +} + +impl pallet_preimage::Config for Runtime { + type WeightInfo = weights::pallet_preimage::WeightInfo; + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type ManagerOrigin = EnsureRoot; + type Consideration = HoldConsideration< + AccountId, + Balances, + PreimageHoldReason, + LinearStoragePrice, + >; +} + +// Create the runtime by composing the FRAME pallets that were previously configured. +construct_runtime!( + pub enum Runtime + { + // System support stuff. + System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, + ParachainSystem: cumulus_pallet_parachain_system::{ + Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, + } = 1, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, + + // Monetary stuff. + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, + + // Collator support. the order of these 5 are important and shall not change. + Authorship: pallet_authorship::{Pallet, Storage} = 20, + CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, + Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, + Aura: pallet_aura::{Pallet, Storage, Config} = 23, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, + + // XCM helpers. + XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, + CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, + DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, + + // Handy utilities. + Utility: pallet_utility::{Pallet, Call, Event} = 40, + Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, + Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, + Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 43, + Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 44, + + // The main stage. + + // The Alliance. + Alliance: pallet_alliance::{Pallet, Call, Storage, Event, Config} = 50, + AllianceMotion: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 51, + + // The Fellowship. + // pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; + FellowshipCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 60, + // pub type FellowshipReferendaInstance = pallet_referenda::Instance1; + FellowshipReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 61, + FellowshipOrigins: pallet_fellowship_origins::{Origin} = 62, + // pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; + FellowshipCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 63, + // pub type FellowshipSalaryInstance = pallet_salary::Instance1; + FellowshipSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 64, + + // Ambassador Program. + AmbassadorCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 70, + AmbassadorReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 71, + AmbassadorOrigins: pallet_ambassador_origins::{Origin} = 72, + AmbassadorCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 73, + AmbassadorSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 74, + AmbassadorContent: pallet_collective_content::::{Pallet, Call, Storage, Event} = 75, + } +); + +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +/// All migrations executed on runtime upgrade as a nested tuple of types implementing +/// `OnRuntimeUpgrade`. Included migrations must be idempotent. +type Migrations = ( + // unreleased + pallet_collator_selection::migration::v1::MigrateToV1, +); + +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, + Migrations, +>; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + frame_benchmarking::define_benchmarks!( + [frame_system, SystemBench::] + [pallet_balances, Balances] + [pallet_message_queue, MessageQueue] + [pallet_multisig, Multisig] + [pallet_proxy, Proxy] + [pallet_session, SessionBench::] + [pallet_utility, Utility] + [pallet_timestamp, Timestamp] + [pallet_collator_selection, CollatorSelection] + [cumulus_pallet_parachain_system, ParachainSystem] + [cumulus_pallet_xcmp_queue, XcmpQueue] + [cumulus_pallet_dmp_queue, DmpQueue] + [pallet_alliance, Alliance] + [pallet_collective, AllianceMotion] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] + [pallet_preimage, Preimage] + [pallet_scheduler, Scheduler] + [pallet_referenda, FellowshipReferenda] + [pallet_ranked_collective, FellowshipCollective] + [pallet_core_fellowship, FellowshipCore] + [pallet_salary, FellowshipSalary] + [pallet_referenda, AmbassadorReferenda] + [pallet_ranked_collective, AmbassadorCollective] + [pallet_collective_content, AmbassadorContent] + [pallet_core_fellowship, AmbassadorCore] + [pallet_salary, AmbassadorSalary] + ); +} + +impl_runtime_apis! { + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi + for Runtime + { + fn query_call_info( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::RuntimeDispatchInfo { + TransactionPayment::query_call_info(call, len) + } + fn query_call_fee_details( + call: RuntimeCall, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_call_fee_details(call, len) + } + fn query_weight_to_fee(weight: Weight) -> Balance { + TransactionPayment::weight_to_fee(weight) + } + fn query_length_to_fee(length: u32) -> Balance { + TransactionPayment::length_to_fee(length) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + let weight = Executive::try_runtime_upgrade(checks).unwrap(); + (weight, RuntimeBlockWeights::get().max_block) + } + + fn execute_block( + block: Block, + state_root_check: bool, + signature_check: bool, + select: frame_try_runtime::TryStateSelect, + ) -> Weight { + // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to + // have a backtrace here. + Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use cumulus_pallet_session_benchmarking::Pallet as SessionBench; + impl cumulus_pallet_session_benchmarking::Config for Runtime {} + + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; + impl pallet_xcm::benchmarking::Config for Runtime { + fn reachable_dest() -> Option { + Some(Parent.into()) + } + + fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Relay/native token can be teleported between Collectives and Relay. + Some(( + MultiAsset { + fun: Fungible(EXISTENTIAL_DEPOSIT), + id: Concrete(Parent.into()) + }.into(), + Parent.into(), + )) + } + + fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { + // Reserve transfers are disabled on Collectives. + None + } + } + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs new file mode 100644 index 00000000000..e7fdb2aae2a --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/block_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Importing a block with 0 Extrinsics. + pub const BlockExecutionWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::BlockExecutionWeight::get(); + + // At least 100 µs. + assert!( + w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 100 µs." + ); + // At most 50 ms. + assert!( + w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 50 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs new file mode 100644 index 00000000000..cc41dcd6cbb --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs @@ -0,0 +1,131 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_dmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_dmp_queue +// --chain=asset-hub-kusama-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_dmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65696` + // Estimated: `69161` + // Minimum execution time: 124_651_000 picoseconds. + Weight::from_parts(127_857_000, 0) + .saturating_add(Weight::from_parts(0, 69161)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65659` + // Estimated: `69124` + // Minimum execution time: 65_684_000 picoseconds. + Weight::from_parts(68_039_000, 0) + .saturating_add(Weight::from_parts(0, 69124)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_overweight_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65726` + // Estimated: `69191` + // Minimum execution time: 117_657_000 picoseconds. + Weight::from_parts(122_035_000, 0) + .saturating_add(Weight::from_parts(0, 69191)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) + /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) + /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) + fn on_idle_overweight_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65689` + // Estimated: `69154` + // Minimum execution time: 59_799_000 picoseconds. + Weight::from_parts(61_354_000, 0) + .saturating_add(Weight::from_parts(0, 69154)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 00000000000..0b7a2fc21cd --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// collectives-polkadot-dev +// --pallet +// cumulus_pallet_parachain_system +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/collectives/collectives-polkadot/src/weights +// --steps +// 50 +// --repeat +// 20 + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) + /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) + /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) + /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: MessageQueue Pages (r:0 w:16) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `48` + // Estimated: `8121` + // Minimum execution time: 1_988_000 picoseconds. + Weight::from_parts(2_039_000, 0) + .saturating_add(Weight::from_parts(0, 8121)) + // Standard Error: 30_660 + .saturating_add(Weight::from_parts(24_419_204, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs new file mode 100644 index 00000000000..e68c075bffc --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_xcmp_queue.rs @@ -0,0 +1,148 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `cumulus_pallet_xcmp_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --pallet +// cumulus-pallet-xcmp-queue +// --chain +// collectives-polkadot-dev +// --output +// cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +// --extrinsic +// + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_xcmp_queue`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_config_with_u32() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 5_000_000 picoseconds. + Weight::from_parts(6_000_000, 0) + .saturating_add(Weight::from_parts(0, 1627)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn enqueue_xcmp_message() -> Weight { + // Proof Size summary in bytes: + // Measured: `148` + // Estimated: `3517` + // Minimum execution time: 14_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn suspend_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1627` + // Minimum execution time: 3_000_000 picoseconds. + Weight::from_parts(3_000_000, 0) + .saturating_add(Weight::from_parts(0, 1627)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn resume_channel() -> Weight { + // Proof Size summary in bytes: + // Measured: `177` + // Estimated: `1662` + // Minimum execution time: 4_000_000 picoseconds. + Weight::from_parts(5_000_000, 0) + .saturating_add(Weight::from_parts(0, 1662)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn take_first_concatenated_xcm() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) + /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) + /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn on_idle_good_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65777` + // Estimated: `69242` + // Minimum execution time: 60_000_000 picoseconds. + Weight::from_parts(63_000_000, 0) + .saturating_add(Weight::from_parts(0, 69242)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) + fn on_idle_large_msg() -> Weight { + // Proof Size summary in bytes: + // Measured: `65776` + // Estimated: `69241` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 69241)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs new file mode 100644 index 00000000000..1a4adb968bb --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/extrinsic_weights.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, Weight}, + }; + + parameter_types! { + /// Executing a NO-OP `System::remarks` Extrinsic. + pub const ExtrinsicBaseWeight: Weight = + Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); + } + + #[cfg(test)] + mod test_weights { + use frame_support::weights::constants; + + /// Checks that the weight exists and is sane. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + let w = super::constants::ExtrinsicBaseWeight::get(); + + // At least 10 µs. + assert!( + w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, + "Weight should be at least 10 µs." + ); + // At most 1 ms. + assert!( + w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs new file mode 100644 index 00000000000..b6f1dc8dc08 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/frame_system.rs @@ -0,0 +1,154 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=frame_system +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_926_000 picoseconds. + Weight::from_parts(1_929_666, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_221_000 picoseconds. + Weight::from_parts(34_449_539, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 7 + .saturating_add(Weight::from_parts(1_706, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 3_681_000 picoseconds. + Weight::from_parts(3_857_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `156` + // Estimated: `1641` + // Minimum execution time: 101_899_621_000 picoseconds. + Weight::from_parts(106_377_672_000, 0) + .saturating_add(Weight::from_parts(0, 1641)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_039_000 picoseconds. + Weight::from_parts(2_094_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_124 + .saturating_add(Weight::from_parts(754_465, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_103_000 picoseconds. + Weight::from_parts(2_182_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_031 + .saturating_add(Weight::from_parts(570_563, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `82 + p * (69 ±0)` + // Estimated: `78 + p * (70 ±0)` + // Minimum execution time: 3_728_000 picoseconds. + Weight::from_parts(3_836_000, 0) + .saturating_add(Weight::from_parts(0, 78)) + // Standard Error: 1_802 + .saturating_add(Weight::from_parts(1_199_345, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs new file mode 100644 index 00000000000..1d877fdbd2b --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -0,0 +1,50 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod block_weights; +pub mod cumulus_pallet_dmp_queue; +pub mod cumulus_pallet_parachain_system; +pub mod cumulus_pallet_xcmp_queue; +pub mod extrinsic_weights; +pub mod frame_system; +pub mod pallet_alliance; +pub mod pallet_balances; +pub mod pallet_collator_selection; +pub mod pallet_collective; +pub mod pallet_collective_content; +pub mod pallet_core_fellowship_ambassador_core; +pub mod pallet_core_fellowship_fellowship_core; +pub mod pallet_message_queue; +pub mod pallet_multisig; +pub mod pallet_preimage; +pub mod pallet_proxy; +pub mod pallet_ranked_collective_ambassador_collective; +pub mod pallet_ranked_collective_fellowship_collective; +pub mod pallet_referenda_ambassador_referenda; +pub mod pallet_referenda_fellowship_referenda; +pub mod pallet_salary_ambassador_salary; +pub mod pallet_salary_fellowship_salary; +pub mod pallet_scheduler; +pub mod pallet_session; +pub mod pallet_timestamp; +pub mod pallet_utility; +pub mod pallet_xcm; +pub mod paritydb_weights; +pub mod rocksdb_weights; + +pub use block_weights::constants::BlockExecutionWeight; +pub use extrinsic_weights::constants::ExtrinsicBaseWeight; +pub use paritydb_weights::constants::ParityDbWeight; +pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs new file mode 100644 index 00000000000..d8ede609a67 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_alliance.rs @@ -0,0 +1,494 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_alliance` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_alliance +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_alliance`. +pub struct WeightInfo(PhantomData); +impl pallet_alliance::WeightInfo for WeightInfo { + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `439 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` + // Minimum execution time: 32_783_000 picoseconds. + Weight::from_parts(32_174_037, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 198 + .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(b.into())) + // Standard Error: 2_074 + .saturating_add(Weight::from_parts(40_945, 0).saturating_mul(m.into())) + // Standard Error: 2_048 + .saturating_add(Weight::from_parts(181_087, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[5, 100]`. + fn vote(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `868 + m * (64 ±0)` + // Estimated: `6676 + m * (64 ±0)` + // Minimum execution time: 28_520_000 picoseconds. + Weight::from_parts(29_661_024, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_336 + .saturating_add(Weight::from_parts(89_873, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `312 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` + // Minimum execution time: 39_353_000 picoseconds. + Weight::from_parts(33_028_008, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_137 + .saturating_add(Weight::from_parts(90_946, 0).saturating_mul(m.into())) + // Standard Error: 2_084 + .saturating_add(Weight::from_parts(175_827, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(_b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `762 + m * (96 ±0) + p * (41 ±0)` + // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` + // Minimum execution time: 52_835_000 picoseconds. + Weight::from_parts(45_963_292, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 3_189 + .saturating_add(Weight::from_parts(111_627, 0).saturating_mul(m.into())) + // Standard Error: 3_109 + .saturating_add(Weight::from_parts(207_923, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `518 + m * (96 ±0) + p * (41 ±0)` + // Estimated: `6676 + m * (109 ±0) + p * (43 ±0)` + // Minimum execution time: 49_980_000 picoseconds. + Weight::from_parts(48_110_301, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 5_057 + .saturating_add(Weight::from_parts(169_065, 0).saturating_mul(m.into())) + // Standard Error: 4_995 + .saturating_add(Weight::from_parts(201_349, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 109).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 43).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:1 w:0) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[1, 1024]`. + /// The range of component `m` is `[5, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(_b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `417 + m * (96 ±0) + p * (36 ±0)` + // Estimated: `6676 + m * (96 ±0) + p * (36 ±0)` + // Minimum execution time: 40_646_000 picoseconds. + Weight::from_parts(36_865_909, 0) + .saturating_add(Weight::from_parts(0, 6676)) + // Standard Error: 2_136 + .saturating_add(Weight::from_parts(74_341, 0).saturating_mul(m.into())) + // Standard Error: 2_059 + .saturating_add(Weight::from_parts(170_035, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[1, 100]`. + /// The range of component `z` is `[0, 100]`. + fn init_members(m: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `12362` + // Minimum execution time: 29_710_000 picoseconds. + Weight::from_parts(17_762_170, 0) + .saturating_add(Weight::from_parts(0, 12362)) + // Standard Error: 1_652 + .saturating_add(Weight::from_parts(156_967, 0).saturating_mul(m.into())) + // Standard Error: 1_632 + .saturating_add(Weight::from_parts(130_352, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:200 w:50) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:50 w:50) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[0, 100]`. + /// The range of component `z` is `[0, 50]`. + fn disband(x: u32, y: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + x * (52 ±0) + y * (53 ±0) + z * (250 ±0)` + // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` + // Minimum execution time: 294_258_000 picoseconds. + Weight::from_parts(295_116_000, 0) + .saturating_add(Weight::from_parts(0, 12362)) + // Standard Error: 23_663 + .saturating_add(Weight::from_parts(553_978, 0).saturating_mul(x.into())) + // Standard Error: 23_549 + .saturating_add(Weight::from_parts(567_024, 0).saturating_mul(y.into())) + // Standard Error: 47_055 + .saturating_add(Weight::from_parts(15_439_056, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(z.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(z.into()))) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) + } + /// Storage: `Alliance::Rule` (r:0 w:1) + /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) + fn set_rule() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_538_000 picoseconds. + Weight::from_parts(8_752_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) + fn announce() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `10187` + // Minimum execution time: 11_213_000 picoseconds. + Weight::from_parts(11_792_000, 0) + .saturating_add(Weight::from_parts(0, 10187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Announcements` (r:1 w:1) + /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) + fn remove_announcement() -> Weight { + // Proof Size summary in bytes: + // Measured: `149` + // Estimated: `10187` + // Minimum execution time: 12_477_000 picoseconds. + Weight::from_parts(12_942_000, 0) + .saturating_add(Weight::from_parts(0, 10187)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:0 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + fn join_alliance() -> Weight { + // Proof Size summary in bytes: + // Measured: `294` + // Estimated: `18048` + // Minimum execution time: 41_517_000 picoseconds. + Weight::from_parts(42_433_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + fn nominate_ally() -> Weight { + // Proof Size summary in bytes: + // Measured: `193` + // Estimated: `18048` + // Minimum execution time: 25_950_000 picoseconds. + Weight::from_parts(26_631_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Alliance::Members` (r:2 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn elevate_ally() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `12362` + // Minimum execution time: 24_470_000 picoseconds. + Weight::from_parts(25_222_000, 0) + .saturating_add(Weight::from_parts(0, 12362)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Alliance::Members` (r:4 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::RetiringMembers` (r:0 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn give_retirement_notice() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `23734` + // Minimum execution time: 31_519_000 picoseconds. + Weight::from_parts(32_827_000, 0) + .saturating_add(Weight::from_parts(0, 23734)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `Alliance::RetiringMembers` (r:1 w:1) + /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `Alliance::Members` (r:1 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn retire() -> Weight { + // Proof Size summary in bytes: + // Measured: `517` + // Estimated: `6676` + // Minimum execution time: 38_799_000 picoseconds. + Weight::from_parts(39_634_000, 0) + .saturating_add(Weight::from_parts(0, 6676)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `Alliance::Members` (r:3 w:1) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Alliance::DepositOf` (r:1 w:1) + /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn kick_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `643` + // Estimated: `18048` + // Minimum execution time: 137_442_000 picoseconds. + Weight::from_parts(142_142_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(13)) + .saturating_add(T::DbWeight::get().writes(8)) + } + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. + fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `27187` + // Minimum execution time: 7_189_000 picoseconds. + Weight::from_parts(7_387_000, 0) + .saturating_add(Weight::from_parts(0, 27187)) + // Standard Error: 3_417 + .saturating_add(Weight::from_parts(1_581_413, 0).saturating_mul(n.into())) + // Standard Error: 1_338 + .saturating_add(Weight::from_parts(67_739, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) + /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `l` is `[0, 255]`. + fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + l * (100 ±0) + n * (289 ±0)` + // Estimated: `27187` + // Minimum execution time: 7_201_000 picoseconds. + Weight::from_parts(7_325_000, 0) + .saturating_add(Weight::from_parts(0, 27187)) + // Standard Error: 183_302 + .saturating_add(Weight::from_parts(16_886_382, 0).saturating_mul(n.into())) + // Standard Error: 71_789 + .saturating_add(Weight::from_parts(352_937, 0).saturating_mul(l.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Alliance::Members` (r:3 w:2) + /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:0 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn abdicate_fellow_status() -> Weight { + // Proof Size summary in bytes: + // Measured: `236` + // Estimated: `18048` + // Minimum execution time: 29_653_000 picoseconds. + Weight::from_parts(30_365_000, 0) + .saturating_add(Weight::from_parts(0, 18048)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs new file mode 100644 index 00000000000..6c1cf072257 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_balances.rs @@ -0,0 +1,152 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_balances` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_balances +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_balances`. +pub struct WeightInfo(PhantomData); +impl pallet_balances::WeightInfo for WeightInfo { + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_allow_death() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 55_696_000 picoseconds. + Weight::from_parts(56_582_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_keep_alive() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 40_885_000 picoseconds. + Weight::from_parts(41_993_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_creating() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 14_565_000 picoseconds. + Weight::from_parts(15_080_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_set_balance_killing() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 22_158_000 picoseconds. + Weight::from_parts(22_715_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_transfer() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 57_957_000 picoseconds. + Weight::from_parts(58_618_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn transfer_all() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3593` + // Minimum execution time: 52_018_000 picoseconds. + Weight::from_parts(52_795_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + fn force_unreserve() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `3593` + // Minimum execution time: 17_469_000 picoseconds. + Weight::from_parts(18_030_000, 0) + .saturating_add(Weight::from_parts(0, 3593)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `System::Account` (r:999 w:999) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `u` is `[1, 1000]`. + fn upgrade_accounts(u: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + u * (136 ±0)` + // Estimated: `990 + u * (2603 ±0)` + // Minimum execution time: 17_223_000 picoseconds. + Weight::from_parts(17_587_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 16_201 + .saturating_add(Weight::from_parts(15_360_967, 0).saturating_mul(u.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs new file mode 100644 index 00000000000..03f3ff602a5 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collator_selection.rs @@ -0,0 +1,246 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collator_selection` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_collator_selection +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collator_selection`. +pub struct WeightInfo(PhantomData); +impl pallet_collator_selection::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:20 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 20]`. + fn set_invulnerables(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `163 + b * (79 ±0)` + // Estimated: `1154 + b * (2555 ±0)` + // Minimum execution time: 14_616_000 picoseconds. + Weight::from_parts(12_150_410, 0) + .saturating_add(Weight::from_parts(0, 1154)) + // Standard Error: 6_270 + .saturating_add(Weight::from_parts(3_256_932, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) + } + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `b` is `[1, 19]`. + /// The range of component `c` is `[1, 99]`. + fn add_invulnerable(b: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `756 + b * (32 ±0) + c * (53 ±0)` + // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` + // Minimum execution time: 48_450_000 picoseconds. + Weight::from_parts(51_166_679, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_588 + .saturating_add(Weight::from_parts(167_219, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// The range of component `b` is `[5, 20]`. + fn remove_invulnerable(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119 + b * (32 ±0)` + // Estimated: `6287` + // Minimum execution time: 15_830_000 picoseconds. + Weight::from_parts(15_792_847, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 5_343 + .saturating_add(Weight::from_parts(167_955, 0).saturating_mul(b.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_desired_candidates() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_424_000 picoseconds. + Weight::from_parts(7_767_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_388_000 picoseconds. + Weight::from_parts(7_677_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) + /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `Session::NextKeys` (r:1 w:0) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) + /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[1, 99]`. + fn register_as_candidate(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `736 + c * (52 ±0)` + // Estimated: `6287 + c * (54 ±0)` + // Minimum execution time: 41_241_000 picoseconds. + Weight::from_parts(46_090_319, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_918 + .saturating_add(Weight::from_parts(161_140, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:1) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// The range of component `c` is `[4, 100]`. + fn leave_intent(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_221_000 picoseconds. + Weight::from_parts(36_183_872, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_766 + .saturating_add(Weight::from_parts(168_742, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn update_bond(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + fn take_candidate_slot(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `306 + c * (50 ±0)` + // Estimated: `6287` + // Minimum execution time: 34_814_000 picoseconds. + Weight::from_parts(36_371_520, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 2_391 + .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn note_author() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `6196` + // Minimum execution time: 43_910_000 picoseconds. + Weight::from_parts(44_796_000, 0) + .saturating_add(Weight::from_parts(0, 6196)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `CollatorSelection::Candidates` (r:1 w:0) + /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) + /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) + /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) + /// Storage: `System::BlockWeight` (r:1 w:1) + /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:97 w:97) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `r` is `[1, 100]`. + /// The range of component `c` is `[1, 100]`. + fn new_session(r: u32, c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` + // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` + // Minimum execution time: 17_092_000 picoseconds. + Weight::from_parts(17_635_000, 0) + .saturating_add(Weight::from_parts(0, 6287)) + // Standard Error: 351_635 + .saturating_add(Weight::from_parts(15_162_192, 0).saturating_mul(c.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) + .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) + .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs new file mode 100644 index 00000000000..9133baa6120 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective.rs @@ -0,0 +1,304 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_collective +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_collective::WeightInfo for WeightInfo { + /// Storage: `AllianceMotion::Members` (r:1 w:1) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:0) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:100 w:100) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:0 w:1) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + /// The range of component `p` is `[0, 100]`. + fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` + // Estimated: `15691 + m * (1967 ±23) + p * (4332 ±23)` + // Minimum execution time: 16_410_000 picoseconds. + Weight::from_parts(16_816_000, 0) + .saturating_add(Weight::from_parts(0, 15691)) + // Standard Error: 59_812 + .saturating_add(Weight::from_parts(4_516_537, 0).saturating_mul(m.into())) + // Standard Error: 59_812 + .saturating_add(Weight::from_parts(7_992_168, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[1, 100]`. + fn execute(b: u32, m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32 + m * (32 ±0)` + // Estimated: `1518 + m * (32 ±0)` + // Minimum execution time: 14_418_000 picoseconds. + Weight::from_parts(13_588_617, 0) + .saturating_add(Weight::from_parts(0, 1518)) + // Standard Error: 21 + .saturating_add(Weight::from_parts(1_711, 0).saturating_mul(b.into())) + // Standard Error: 223 + .saturating_add(Weight::from_parts(13_836, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:0) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[1, 100]`. + fn propose_execute(b: u32, m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `32 + m * (32 ±0)` + // Estimated: `3498 + m * (32 ±0)` + // Minimum execution time: 17_174_000 picoseconds. + Weight::from_parts(16_192_764, 0) + .saturating_add(Weight::from_parts(0, 3498)) + // Standard Error: 27 + .saturating_add(Weight::from_parts(1_672, 0).saturating_mul(b.into())) + // Standard Error: 280 + .saturating_add(Weight::from_parts(24_343, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[2, 100]`. + /// The range of component `p` is `[1, 100]`. + fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `322 + m * (32 ±0) + p * (36 ±0)` + // Estimated: `3714 + m * (33 ±0) + p * (36 ±0)` + // Minimum execution time: 23_970_000 picoseconds. + Weight::from_parts(23_004_052, 0) + .saturating_add(Weight::from_parts(0, 3714)) + // Standard Error: 123 + .saturating_add(Weight::from_parts(2_728, 0).saturating_mul(b.into())) + // Standard Error: 1_291 + .saturating_add(Weight::from_parts(32_731, 0).saturating_mul(m.into())) + // Standard Error: 1_275 + .saturating_add(Weight::from_parts(199_537, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[5, 100]`. + fn vote(m: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `771 + m * (64 ±0)` + // Estimated: `4235 + m * (64 ±0)` + // Minimum execution time: 25_843_000 picoseconds. + Weight::from_parts(26_092_578, 0) + .saturating_add(Weight::from_parts(0, 4235)) + // Standard Error: 1_785 + .saturating_add(Weight::from_parts(67_298, 0).saturating_mul(m.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `360 + m * (64 ±0) + p * (36 ±0)` + // Estimated: `3805 + m * (65 ±0) + p * (36 ±0)` + // Minimum execution time: 27_543_000 picoseconds. + Weight::from_parts(26_505_473, 0) + .saturating_add(Weight::from_parts(0, 3805)) + // Standard Error: 1_054 + .saturating_add(Weight::from_parts(35_295, 0).saturating_mul(m.into())) + // Standard Error: 1_028 + .saturating_add(Weight::from_parts(190_508, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `662 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `3979 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 40_375_000 picoseconds. + Weight::from_parts(34_081_294, 0) + .saturating_add(Weight::from_parts(0, 3979)) + // Standard Error: 196 + .saturating_add(Weight::from_parts(3_796, 0).saturating_mul(b.into())) + // Standard Error: 2_072 + .saturating_add(Weight::from_parts(50_954, 0).saturating_mul(m.into())) + // Standard Error: 2_020 + .saturating_add(Weight::from_parts(246_000, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_disapproved(m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `458 + m * (48 ±0) + p * (36 ±0)` + // Estimated: `3898 + m * (49 ±0) + p * (36 ±0)` + // Minimum execution time: 28_793_000 picoseconds. + Weight::from_parts(29_656_832, 0) + .saturating_add(Weight::from_parts(0, 3898)) + // Standard Error: 1_214 + .saturating_add(Weight::from_parts(22_148, 0).saturating_mul(m.into())) + // Standard Error: 1_184 + .saturating_add(Weight::from_parts(189_860, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 49).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Voting` (r:1 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Members` (r:1 w:0) + /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Prime` (r:1 w:0) + /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `b` is `[2, 1024]`. + /// The range of component `m` is `[4, 100]`. + /// The range of component `p` is `[1, 100]`. + fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `682 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` + // Estimated: `3999 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` + // Minimum execution time: 40_887_000 picoseconds. + Weight::from_parts(39_529_567, 0) + .saturating_add(Weight::from_parts(0, 3999)) + // Standard Error: 191 + .saturating_add(Weight::from_parts(2_802, 0).saturating_mul(b.into())) + // Standard Error: 2_021 + .saturating_add(Weight::from_parts(35_956, 0).saturating_mul(m.into())) + // Standard Error: 1_970 + .saturating_add(Weight::from_parts(235_154, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) + .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) + .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) + } + /// Storage: `AllianceMotion::Proposals` (r:1 w:1) + /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::Voting` (r:0 w:1) + /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) + /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[1, 100]`. + fn disapprove_proposal(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `189 + p * (32 ±0)` + // Estimated: `1674 + p * (32 ±0)` + // Minimum execution time: 14_040_000 picoseconds. + Weight::from_parts(15_075_964, 0) + .saturating_add(Weight::from_parts(0, 1674)) + // Standard Error: 854 + .saturating_add(Weight::from_parts(159_597, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs new file mode 100644 index 00000000000..6be94db22db --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_collective_content.rs @@ -0,0 +1,93 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_collective_content` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-18, STEPS: `10`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/debug/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --steps=10 +// --repeat=3 +// --pallet=pallet_collective_content +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_collective_content`. +pub struct WeightInfo(PhantomData); +impl pallet_collective_content::WeightInfo for WeightInfo { + /// Storage: `AmbassadorContent::Charter` (r:0 w:1) + /// Proof: `AmbassadorContent::Charter` (`max_values`: Some(1), `max_size`: Some(70), added: 565, mode: `MaxEncodedLen`) + fn set_charter() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(99_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) + /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::NextAnnouncementExpireAt` (r:1 w:1) + /// Proof: `AmbassadorContent::NextAnnouncementExpireAt` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::Announcements` (r:0 w:1) + /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) + fn announce() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `3507` + // Minimum execution time: 273_000_000 picoseconds. + Weight::from_parts(278_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::Announcements` (r:1 w:1) + /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) + /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn remove_announcement() -> Weight { + // Proof Size summary in bytes: + // Measured: `450` + // Estimated: `3555` + // Minimum execution time: 326_000_000 picoseconds. + Weight::from_parts(338_000_000, 0) + .saturating_add(Weight::from_parts(0, 3555)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs new file mode 100644 index 00000000000..f40940a8b25 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_ambassador_core.rs @@ -0,0 +1,223 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_core_fellowship` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_core_fellowship +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_core_fellowship`. +pub struct WeightInfo(PhantomData); +impl pallet_core_fellowship::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCore::Params` (r:0 w:1) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(11_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `66011` + // Estimated: `69046` + // Minimum execution time: 96_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_demote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66121` + // Estimated: `69046` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(116_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn set_active() -> Weight { + // Proof Size summary in bytes: + // Measured: `360` + // Estimated: `3514` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `118` + // Estimated: `3514` + // Minimum execution time: 36_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Params` (r:1 w:0) + /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn promote() -> Weight { + // Proof Size summary in bytes: + // Measured: `65989` + // Estimated: `69046` + // Minimum execution time: 95_000_000 picoseconds. + Weight::from_parts(110_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:0 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `331` + // Estimated: `3514` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + fn import() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `3514` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::Member` (r:1 w:1) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn approve() -> Weight { + // Proof Size summary in bytes: + // Measured: `65967` + // Estimated: `69046` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(104_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorCore::Member` (r:1 w:0) + /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) + /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn submit_evidence() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `69046` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs new file mode 100644 index 00000000000..471ee82ead7 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_core_fellowship_fellowship_core.rs @@ -0,0 +1,222 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_core_fellowship` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_core_fellowship +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_core_fellowship`. +pub struct WeightInfo(PhantomData); +impl pallet_core_fellowship::WeightInfo for WeightInfo { + /// Storage: `FellowshipCore::Params` (r:0 w:1) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + fn set_params() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 11_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `66144` + // Estimated: `69046` + // Minimum execution time: 109_000_000 picoseconds. + Weight::from_parts(125_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn bump_demote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66254` + // Estimated: `69046` + // Minimum execution time: 112_000_000 picoseconds. + Weight::from_parts(114_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + fn set_active() -> Weight { + // Proof Size summary in bytes: + // Measured: `493` + // Estimated: `3514` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `251` + // Estimated: `3514` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Params` (r:1 w:0) + /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn promote() -> Weight { + // Proof Size summary in bytes: + // Measured: `66122` + // Estimated: `69046` + // Minimum execution time: 97_000_000 picoseconds. + Weight::from_parts(129_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:0 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn offboard() -> Weight { + // Proof Size summary in bytes: + // Measured: `464` + // Estimated: `3514` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + fn import() -> Weight { + // Proof Size summary in bytes: + // Measured: `418` + // Estimated: `3514` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) + .saturating_add(Weight::from_parts(0, 3514)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::Member` (r:1 w:1) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn approve() -> Weight { + // Proof Size summary in bytes: + // Measured: `66100` + // Estimated: `69046` + // Minimum execution time: 89_000_000 picoseconds. + Weight::from_parts(119_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipCore::Member` (r:1 w:0) + /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) + /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) + fn submit_evidence() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `69046` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(52_000_000, 0) + .saturating_add(Weight::from_parts(0, 69046)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 00000000000..4bd71c4e7d4 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/polkadot-parachain +// benchmark +// pallet +// --chain +// collectives-polkadot-dev +// --pallet +// pallet_message_queue +// --extrinsic +// * +// --execution +// wasm +// --wasm-execution +// compiled +// --output +// parachains/runtimes/collectives/collectives-polkadot/src/weights + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::Weight}; +use sp_std::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: MessageQueue ServiceHead (r:1 w:0) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `189` + // Estimated: `7534` + // Minimum execution time: 11_440_000 picoseconds. + Weight::from_parts(11_440_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:2 w:2) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `184` + // Estimated: `7534` + // Minimum execution time: 11_077_000 picoseconds. + Weight::from_parts(11_077_000, 0) + .saturating_add(Weight::from_parts(0, 7534)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 3_977_000 picoseconds. + Weight::from_parts(3_977_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 4_831_000 picoseconds. + Weight::from_parts(4_831_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_192_000 picoseconds. + Weight::from_parts(5_192_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 58_750_000 picoseconds. + Weight::from_parts(58_750_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: MessageQueue ServiceHead (r:1 w:1) + /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: MessageQueue BookStateFor (r:1 w:0) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `99` + // Estimated: `5007` + // Minimum execution time: 5_107_000 picoseconds. + Weight::from_parts(5_107_000, 0) + .saturating_add(Weight::from_parts(0, 5007)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 46_814_000 picoseconds. + Weight::from_parts(46_814_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 52_510_000 picoseconds. + Weight::from_parts(52_510_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: MessageQueue BookStateFor (r:1 w:1) + /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: MessageQueue Pages (r:1 w:1) + /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `72567` + // Minimum execution time: 71_930_000 picoseconds. + Weight::from_parts(71_930_000, 0) + .saturating_add(Weight::from_parts(0, 72567)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs new file mode 100644 index 00000000000..a7827b72009 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_multisig.rs @@ -0,0 +1,164 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_multisig` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_multisig +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_multisig`. +pub struct WeightInfo(PhantomData); +impl pallet_multisig::WeightInfo for WeightInfo { + /// The range of component `z` is `[0, 10000]`. + fn as_multi_threshold_1(z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 13_288_000 picoseconds. + Weight::from_parts(14_235_741, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(500, 0).saturating_mul(z.into())) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_create(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `328 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 44_865_000 picoseconds. + Weight::from_parts(33_468_056, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_513 + .saturating_add(Weight::from_parts(130_544, 0).saturating_mul(s.into())) + // Standard Error: 14 + .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[3, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_approve(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348` + // Estimated: `6811` + // Minimum execution time: 29_284_000 picoseconds. + Weight::from_parts(18_708_967, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 916 + .saturating_add(Weight::from_parts(119_202, 0).saturating_mul(s.into())) + // Standard Error: 8 + .saturating_add(Weight::from_parts(1_447, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + /// The range of component `z` is `[0, 10000]`. + fn as_multi_complete(s: u32, z: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `451 + s * (33 ±0)` + // Estimated: `6811` + // Minimum execution time: 49_462_000 picoseconds. + Weight::from_parts(34_470_286, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_738 + .saturating_add(Weight::from_parts(178_227, 0).saturating_mul(s.into())) + // Standard Error: 17 + .saturating_add(Weight::from_parts(1_644, 0).saturating_mul(z.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_create(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `329 + s * (2 ±0)` + // Estimated: `6811` + // Minimum execution time: 30_749_000 picoseconds. + Weight::from_parts(31_841_438, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 1_033 + .saturating_add(Weight::from_parts(123_126, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn approve_as_multi_approve(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `348` + // Estimated: `6811` + // Minimum execution time: 17_436_000 picoseconds. + Weight::from_parts(18_036_002, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 829 + .saturating_add(Weight::from_parts(109_450, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Multisig::Multisigs` (r:1 w:1) + /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) + /// The range of component `s` is `[2, 100]`. + fn cancel_as_multi(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `520 + s * (1 ±0)` + // Estimated: `6811` + // Minimum execution time: 31_532_000 picoseconds. + Weight::from_parts(32_818_015, 0) + .saturating_add(Weight::from_parts(0, 6811)) + // Standard Error: 977 + .saturating_add(Weight::from_parts(123_121, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs new file mode 100644 index 00000000000..9b45c875818 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_preimage.rs @@ -0,0 +1,232 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_preimage` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_preimage +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_preimage`. +pub struct WeightInfo(PhantomData); +impl pallet_preimage::WeightInfo for WeightInfo { + fn ensure_updated(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193 + n * (91 ±0)` + // Estimated: `3593 + n * (2566 ±0)` + // Minimum execution time: 2_000_000 picoseconds. + Weight::from_parts(2_000_000, 3593) + // Standard Error: 13_720 + .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) + } + + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `177` + // Estimated: `3556` + // Minimum execution time: 29_323_000 picoseconds. + Weight::from_parts(29_793_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(2_504, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_requested_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 15_581_000 picoseconds. + Weight::from_parts(15_659_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(2_500, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 4194304]`. + fn note_no_deposit_preimage(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 15_028_000 picoseconds. + Weight::from_parts(15_150_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + // Standard Error: 6 + .saturating_add(Weight::from_parts(2_560, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unnote_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `323` + // Estimated: `3556` + // Minimum execution time: 55_113_000 picoseconds. + Weight::from_parts(59_127_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unnote_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 38_033_000 picoseconds. + Weight::from_parts(41_203_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `222` + // Estimated: `3556` + // Minimum execution time: 31_482_000 picoseconds. + Weight::from_parts(34_726_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_no_deposit_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 20_724_000 picoseconds. + Weight::from_parts(22_928_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `76` + // Estimated: `3556` + // Minimum execution time: 27_015_000 picoseconds. + Weight::from_parts(29_240_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn request_requested_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 10_712_000 picoseconds. + Weight::from_parts(11_317_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `Preimage::PreimageFor` (r:0 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) + fn unrequest_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `178` + // Estimated: `3556` + // Minimum execution time: 34_528_000 picoseconds. + Weight::from_parts(35_982_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn unrequest_unnoted_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 11_059_000 picoseconds. + Weight::from_parts(12_458_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + fn unrequest_multi_referenced_preimage() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3556` + // Minimum execution time: 11_502_000 picoseconds. + Weight::from_parts(12_180_000, 0) + .saturating_add(Weight::from_parts(0, 3556)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs new file mode 100644 index 00000000000..59d9f912bf1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_proxy.rs @@ -0,0 +1,225 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_proxy` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_proxy +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_proxy`. +pub struct WeightInfo(PhantomData); +impl pallet_proxy::WeightInfo for WeightInfo { + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 15_597_000 picoseconds. + Weight::from_parts(16_231_993, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_665 + .saturating_add(Weight::from_parts(29_818, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn proxy_announced(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `454 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 36_685_000 picoseconds. + Weight::from_parts(36_376_358, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 3_003 + .saturating_add(Weight::from_parts(133_776, 0).saturating_mul(a.into())) + // Standard Error: 3_103 + .saturating_add(Weight::from_parts(60_315, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn remove_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 23_835_000 picoseconds. + Weight::from_parts(24_154_219, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_580 + .saturating_add(Weight::from_parts(125_884, 0).saturating_mul(a.into())) + // Standard Error: 1_632 + .saturating_add(Weight::from_parts(21_563, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn reject_announcement(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `369 + a * (68 ±0)` + // Estimated: `5698` + // Minimum execution time: 23_997_000 picoseconds. + Weight::from_parts(24_301_638, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_658 + .saturating_add(Weight::from_parts(133_005, 0).saturating_mul(a.into())) + // Standard Error: 1_713 + .saturating_add(Weight::from_parts(20_237, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:0) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// Storage: `Proxy::Announcements` (r:1 w:1) + /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// The range of component `a` is `[0, 31]`. + /// The range of component `p` is `[1, 31]`. + fn announce(a: u32, p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `386 + a * (68 ±0) + p * (37 ±0)` + // Estimated: `5698` + // Minimum execution time: 33_604_000 picoseconds. + Weight::from_parts(33_322_880, 0) + .saturating_add(Weight::from_parts(0, 5698)) + // Standard Error: 1_840 + .saturating_add(Weight::from_parts(114_037, 0).saturating_mul(a.into())) + // Standard Error: 1_901 + .saturating_add(Weight::from_parts(45_629, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn add_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_634_000 picoseconds. + Weight::from_parts(25_509_118, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_278 + .saturating_add(Weight::from_parts(38_401, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxy(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 24_855_000 picoseconds. + Weight::from_parts(25_753_505, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_819 + .saturating_add(Weight::from_parts(44_357, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn remove_proxies(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `127 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_211_000 picoseconds. + Weight::from_parts(23_094_124, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_597 + .saturating_add(Weight::from_parts(36_725, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[1, 31]`. + fn create_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `139` + // Estimated: `4706` + // Minimum execution time: 26_764_000 picoseconds. + Weight::from_parts(27_667_535, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 1_111 + .saturating_add(Weight::from_parts(3_422, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Proxy::Proxies` (r:1 w:1) + /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) + /// The range of component `p` is `[0, 30]`. + fn kill_pure(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `164 + p * (37 ±0)` + // Estimated: `4706` + // Minimum execution time: 22_632_000 picoseconds. + Weight::from_parts(23_678_772, 0) + .saturating_add(Weight::from_parts(0, 4706)) + // Standard Error: 2_136 + .saturating_add(Weight::from_parts(26_492, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs new file mode 100644 index 00000000000..a6372c4b89d --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_ambassador_collective.rs @@ -0,0 +1,177 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_ranked_collective +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_ranked_collective::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `42` + // Estimated: `3507` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:11 w:11) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:11 w:11) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:11 w:11) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 158_113 + .saturating_add(Weight::from_parts(16_000_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `210 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(26_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 180_277 + .saturating_add(Weight::from_parts(650_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:1) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1) + /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::IndexToId` (r:1 w:1) + /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `508 + r * (71 ±0)` + // Estimated: `3519` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 335_410 + .saturating_add(Weight::from_parts(550_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Voting` (r:1 w:1) + /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `566` + // Estimated: `317568` + // Minimum execution time: 57_000_000 picoseconds. + Weight::from_parts(60_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::VotingCleanup` (r:1 w:0) + /// Proof: `AmbassadorCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Voting` (r:100 w:100) + /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `209 + n * (52 ±0)` + // Estimated: `4365 + n * (2550 ±0)` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(18_500_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 11_180 + .saturating_add(Weight::from_parts(1_335_000, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs new file mode 100644 index 00000000000..9c773c56ac3 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_ranked_collective_fellowship_collective.rs @@ -0,0 +1,176 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_ranked_collective` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_ranked_collective +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_ranked_collective`. +pub struct WeightInfo(PhantomData); +impl pallet_ranked_collective::WeightInfo for WeightInfo { + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + fn add_member() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `3507` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(22_000_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:11 w:11) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:11 w:11) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:11 w:11) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn remove_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `608 + r * (281 ±0)` + // Estimated: `3519 + r * (2529 ±0)` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(36_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 254_950 + .saturating_add(Weight::from_parts(15_900_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) + .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn promote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `310 + r * (17 ±0)` + // Estimated: `3507` + // Minimum execution time: 25_000_000 picoseconds. + Weight::from_parts(25_500_000, 0) + .saturating_add(Weight::from_parts(0, 3507)) + // Standard Error: 70_710 + .saturating_add(Weight::from_parts(400_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:1) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1) + /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::IndexToId` (r:1 w:1) + /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) + /// The range of component `r` is `[0, 10]`. + /// The range of component `r` is `[0, 10]`. + fn demote_member(r: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `608 + r * (71 ±0)` + // Estimated: `3519` + // Minimum execution time: 35_000_000 picoseconds. + Weight::from_parts(37_500_000, 0) + .saturating_add(Weight::from_parts(0, 3519)) + // Standard Error: 150_000 + .saturating_add(Weight::from_parts(350_000, 0).saturating_mul(r.into())) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:1 w:1) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn vote() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `317568` + // Minimum execution time: 57_000_000 picoseconds. + Weight::from_parts(57_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::VotingCleanup` (r:1 w:0) + /// Proof: `FellowshipCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Voting` (r:100 w:100) + /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 100]`. + /// The range of component `n` is `[0, 100]`. + fn cleanup_poll(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `343 + n * (52 ±0)` + // Estimated: `4365 + n * (2550 ±0)` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + // Standard Error: 25_000 + .saturating_add(Weight::from_parts(1_395_000, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs new file mode 100644 index 00000000000..fdc451c5d31 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_ambassador_referenda.rs @@ -0,0 +1,536 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_referenda +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `255` + // Estimated: `159279` + // Minimum execution time: 32_000_000 picoseconds. + Weight::from_parts(34_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `366` + // Estimated: `317568` + // Minimum execution time: 63_000_000 picoseconds. + Weight::from_parts(68_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1165` + // Estimated: `159279` + // Minimum execution time: 97_000_000 picoseconds. + Weight::from_parts(123_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1173` + // Estimated: `159279` + // Minimum execution time: 104_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `702` + // Estimated: `317568` + // Minimum execution time: 140_000_000 picoseconds. + Weight::from_parts(150_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `601` + // Estimated: `317568` + // Minimum execution time: 81_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `317` + // Estimated: `4365` + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(38_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `167` + // Estimated: `4365` + // Minimum execution time: 17_000_000 picoseconds. + Weight::from_parts(18_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `317568` + // Minimum execution time: 44_000_000 picoseconds. + Weight::from_parts(45_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:0) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `626` + // Estimated: `317568` + // Minimum execution time: 183_000_000 picoseconds. + Weight::from_parts(187_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:0) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3636` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(12_000_000, 0) + .saturating_add(Weight::from_parts(0, 3636)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `1412` + // Estimated: `159279` + // Minimum execution time: 88_000_000 picoseconds. + Weight::from_parts(97_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `1412` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(92_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `935` + // Estimated: `4365` + // Minimum execution time: 43_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `935` + // Estimated: `4365` + // Minimum execution time: 39_000_000 picoseconds. + Weight::from_parts(43_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `951` + // Estimated: `4365` + // Minimum execution time: 48_000_000 picoseconds. + Weight::from_parts(50_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) + /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `959` + // Estimated: `4365` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(48_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `263` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `311` + // Estimated: `159279` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `208` + // Estimated: `4365` + // Minimum execution time: 19_000_000 picoseconds. + Weight::from_parts(20_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `546` + // Estimated: `159279` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) + /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `647` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 100_000_000 picoseconds. + Weight::from_parts(120_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `683` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 77_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `704` + // Estimated: `159279` + // Minimum execution time: 68_000_000 picoseconds. + Weight::from_parts(77_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `704` + // Estimated: `317568` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(104_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) + /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `700` + // Estimated: `159279` + // Minimum execution time: 87_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:0 w:1) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `419` + // Estimated: `4365` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:1) + /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `285` + // Estimated: `4365` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs new file mode 100644 index 00000000000..63f68833795 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_referenda_fellowship_referenda.rs @@ -0,0 +1,535 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_referenda` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_referenda +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_referenda`. +pub struct WeightInfo(PhantomData); +impl pallet_referenda::WeightInfo for WeightInfo { + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:0 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn submit() -> Weight { + // Proof Size summary in bytes: + // Measured: `389` + // Estimated: `159279` + // Minimum execution time: 34_000_000 picoseconds. + Weight::from_parts(36_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `317568` + // Minimum execution time: 64_000_000 picoseconds. + Weight::from_parts(67_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2038` + // Estimated: `159279` + // Minimum execution time: 99_000_000 picoseconds. + Weight::from_parts(109_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `2079` + // Estimated: `159279` + // Minimum execution time: 101_000_000 picoseconds. + Weight::from_parts(111_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `836` + // Estimated: `317568` + // Minimum execution time: 135_000_000 picoseconds. + Weight::from_parts(153_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn place_decision_deposit_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `735` + // Estimated: `317568` + // Minimum execution time: 78_000_000 picoseconds. + Weight::from_parts(82_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_decision_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `351` + // Estimated: `4365` + // Minimum execution time: 38_000_000 picoseconds. + Weight::from_parts(39_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn refund_submission_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `201` + // Estimated: `4365` + // Minimum execution time: 18_000_000 picoseconds. + Weight::from_parts(19_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn cancel() -> Weight { + // Proof Size summary in bytes: + // Measured: `345` + // Estimated: `317568` + // Minimum execution time: 45_000_000 picoseconds. + Weight::from_parts(46_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn kill() -> Weight { + // Proof Size summary in bytes: + // Measured: `587` + // Estimated: `317568` + // Minimum execution time: 185_000_000 picoseconds. + Weight::from_parts(196_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(6)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + fn one_fewer_deciding_queue_empty() -> Weight { + // Proof Size summary in bytes: + // Measured: `174` + // Estimated: `4277` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(15_000_000, 0) + .saturating_add(Weight::from_parts(0, 4277)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2452` + // Estimated: `159279` + // Minimum execution time: 82_000_000 picoseconds. + Weight::from_parts(90_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn one_fewer_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `2452` + // Estimated: `159279` + // Minimum execution time: 91_000_000 picoseconds. + Weight::from_parts(99_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_insertion() -> Weight { + // Proof Size summary in bytes: + // Measured: `1841` + // Estimated: `4365` + // Minimum execution time: 41_000_000 picoseconds. + Weight::from_parts(44_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_requeued_slide() -> Weight { + // Proof Size summary in bytes: + // Measured: `1808` + // Estimated: `4365` + // Minimum execution time: 46_000_000 picoseconds. + Weight::from_parts(55_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1824` + // Estimated: `4365` + // Minimum execution time: 49_000_000 picoseconds. + Weight::from_parts(53_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) + /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) + fn nudge_referendum_not_queued() -> Weight { + // Proof Size summary in bytes: + // Measured: `1865` + // Estimated: `4365` + // Minimum execution time: 51_000_000 picoseconds. + Weight::from_parts(54_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_no_deposit() -> Weight { + // Proof Size summary in bytes: + // Measured: `297` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(30_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_preparing() -> Weight { + // Proof Size summary in bytes: + // Measured: `345` + // Estimated: `159279` + // Minimum execution time: 28_000_000 picoseconds. + Weight::from_parts(29_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + fn nudge_referendum_timed_out() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `4365` + // Minimum execution time: 20_000_000 picoseconds. + Weight::from_parts(21_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_failing() -> Weight { + // Proof Size summary in bytes: + // Measured: `680` + // Estimated: `159279` + // Minimum execution time: 42_000_000 picoseconds. + Weight::from_parts(47_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) + /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_deciding_passing() -> Weight { + // Proof Size summary in bytes: + // Measured: `781` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(95_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_begin_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 84_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_end_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `817` + // Estimated: `159279` + // Minimum execution time: 88_000_000 picoseconds. + Weight::from_parts(98_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_not_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 81_000_000 picoseconds. + Weight::from_parts(93_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_continue_confirming() -> Weight { + // Proof Size summary in bytes: + // Measured: `838` + // Estimated: `159279` + // Minimum execution time: 74_000_000 picoseconds. + Weight::from_parts(77_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:2 w:2) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn nudge_referendum_approved() -> Weight { + // Proof Size summary in bytes: + // Measured: `838` + // Estimated: `317568` + // Minimum execution time: 105_000_000 picoseconds. + Weight::from_parts(123_000_000, 0) + .saturating_add(Weight::from_parts(0, 317568)) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) + /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + fn nudge_referendum_rejected() -> Weight { + // Proof Size summary in bytes: + // Measured: `834` + // Estimated: `159279` + // Minimum execution time: 90_000_000 picoseconds. + Weight::from_parts(100_000_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `Preimage::StatusFor` (r:1 w:0) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn set_some_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `453` + // Estimated: `4365` + // Minimum execution time: 24_000_000 picoseconds. + Weight::from_parts(24_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) + /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) + /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:1) + /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn clear_metadata() -> Weight { + // Proof Size summary in bytes: + // Measured: `319` + // Estimated: `4365` + // Minimum execution time: 21_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 4365)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs new file mode 100644 index 00000000000..0522420f2f5 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_ambassador_salary.rs @@ -0,0 +1,190 @@ +// Copyright Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_salary` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_salary +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_salary`. +pub struct WeightInfo(PhantomData); +impl pallet_salary::WeightInfo for WeightInfo { + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn init() -> Weight { + // Proof Size summary in bytes: + // Measured: `109` + // Estimated: `1541` + // Minimum execution time: 12_000_000 picoseconds. + Weight::from_parts(14_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn bump() -> Weight { + // Proof Size summary in bytes: + // Measured: `191` + // Estimated: `1541` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(16_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:0) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `400` + // Estimated: `3551` + // Minimum execution time: 23_000_000 picoseconds. + Weight::from_parts(23_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `467` + // Estimated: `3551` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `879` + // Estimated: `4344` + // Minimum execution time: 68_000_000 picoseconds. + Weight::from_parts(72_000_000, 0) + .saturating_add(Weight::from_parts(0, 4344)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorCollective::Members` (r:1 w:0) + /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `879` + // Estimated: `4344` + // Minimum execution time: 69_000_000 picoseconds. + Weight::from_parts(70_000_000, 0) + .saturating_add(Weight::from_parts(0, 4344)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `AmbassadorSalary::Status` (r:1 w:1) + /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) + /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `479` + // Estimated: `3944` + // Minimum execution time: 27_000_000 picoseconds. + Weight::from_parts(28_000_000, 0) + .saturating_add(Weight::from_parts(0, 3944)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs new file mode 100644 index 00000000000..37680b4e5df --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_salary_fellowship_salary.rs @@ -0,0 +1,189 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_salary` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `cob`, CPU: `` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/release/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_salary +// --extrinsic=* +// --steps=2 +// --repeat=2 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_salary`. +pub struct WeightInfo(PhantomData); +impl pallet_salary::WeightInfo for WeightInfo { + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn init() -> Weight { + // Proof Size summary in bytes: + // Measured: `142` + // Estimated: `1541` + // Minimum execution time: 13_000_000 picoseconds. + Weight::from_parts(17_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + fn bump() -> Weight { + // Proof Size summary in bytes: + // Measured: `224` + // Estimated: `1541` + // Minimum execution time: 15_000_000 picoseconds. + Weight::from_parts(18_000_000, 0) + .saturating_add(Weight::from_parts(0, 1541)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:0) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn induct() -> Weight { + // Proof Size summary in bytes: + // Measured: `395` + // Estimated: `3551` + // Minimum execution time: 22_000_000 picoseconds. + Weight::from_parts(25_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + fn register() -> Weight { + // Proof Size summary in bytes: + // Measured: `462` + // Estimated: `3551` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(29_000_000, 0) + .saturating_add(Weight::from_parts(0, 3551)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `4239` + // Minimum execution time: 67_000_000 picoseconds. + Weight::from_parts(74_000_000, 0) + .saturating_add(Weight::from_parts(0, 4239)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `FellowshipCollective::Members` (r:1 w:0) + /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn payout_other() -> Weight { + // Proof Size summary in bytes: + // Measured: `774` + // Estimated: `4239` + // Minimum execution time: 66_000_000 picoseconds. + Weight::from_parts(71_000_000, 0) + .saturating_add(Weight::from_parts(0, 4239)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(7)) + } + /// Storage: `FellowshipSalary::Status` (r:1 w:1) + /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) + /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) + /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn check_payment() -> Weight { + // Proof Size summary in bytes: + // Measured: `512` + // Estimated: `3977` + // Minimum execution time: 26_000_000 picoseconds. + Weight::from_parts(27_000_000, 0) + .saturating_add(Weight::from_parts(0, 3977)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs new file mode 100644 index 00000000000..cf5610df665 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_scheduler.rs @@ -0,0 +1,206 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_scheduler` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_scheduler +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_scheduler`. +pub struct WeightInfo(PhantomData); +impl pallet_scheduler::WeightInfo for WeightInfo { + /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) + /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn service_agendas_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `31` + // Estimated: `1489` + // Minimum execution time: 3_441_000 picoseconds. + Weight::from_parts(3_604_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 200]`. + fn service_agenda_base(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 2_879_000 picoseconds. + Weight::from_parts(2_963_000, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 3_764 + .saturating_add(Weight::from_parts(909_557, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_172_000 picoseconds. + Weight::from_parts(5_294_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Preimage::PreimageFor` (r:1 w:1) + /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) + /// Storage: `Preimage::StatusFor` (r:1 w:1) + /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) + /// The range of component `s` is `[128, 4194304]`. + fn service_task_fetched(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `213 + s * (1 ±0)` + // Estimated: `3678 + s * (1 ±0)` + // Minimum execution time: 19_704_000 picoseconds. + Weight::from_parts(19_903_000, 0) + .saturating_add(Weight::from_parts(0, 3678)) + // Standard Error: 5 + .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) + } + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + fn service_task_named() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_359_000 picoseconds. + Weight::from_parts(6_599_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_task_periodic() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 5_217_000 picoseconds. + Weight::from_parts(5_333_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_signed() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_406_000 picoseconds. + Weight::from_parts(2_541_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + fn execute_dispatch_unsigned() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_370_000 picoseconds. + Weight::from_parts(2_561_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 199]`. + fn schedule(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 11_784_000 picoseconds. + Weight::from_parts(5_574_404, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_217 + .saturating_add(Weight::from_parts(1_035_248, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Lookup` (r:0 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 200]`. + fn cancel(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `77 + s * (177 ±0)` + // Estimated: `159279` + // Minimum execution time: 16_373_000 picoseconds. + Weight::from_parts(3_088_135, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_095 + .saturating_add(Weight::from_parts(1_745_270, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[0, 199]`. + fn schedule_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `468 + s * (179 ±0)` + // Estimated: `159279` + // Minimum execution time: 14_822_000 picoseconds. + Weight::from_parts(9_591_402, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 7_151 + .saturating_add(Weight::from_parts(1_058_408, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Scheduler::Lookup` (r:1 w:1) + /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) + /// Storage: `Scheduler::Agenda` (r:1 w:1) + /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) + /// The range of component `s` is `[1, 200]`. + fn cancel_named(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `509 + s * (179 ±0)` + // Estimated: `159279` + // Minimum execution time: 18_541_000 picoseconds. + Weight::from_parts(6_522_239, 0) + .saturating_add(Weight::from_parts(0, 159279)) + // Standard Error: 8_349 + .saturating_add(Weight::from_parts(1_760_431, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs new file mode 100644 index 00000000000..2ac0804df89 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_session.rs @@ -0,0 +1,80 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_session` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_session +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_session`. +pub struct WeightInfo(PhantomData); +impl pallet_session::WeightInfo for WeightInfo { + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:1 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn set_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `270` + // Estimated: `3735` + // Minimum execution time: 16_663_000 picoseconds. + Weight::from_parts(17_246_000, 0) + .saturating_add(Weight::from_parts(0, 3735)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `Session::NextKeys` (r:1 w:1) + /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Session::KeyOwner` (r:0 w:1) + /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn purge_keys() -> Weight { + // Proof Size summary in bytes: + // Measured: `242` + // Estimated: `3707` + // Minimum execution time: 11_850_000 picoseconds. + Weight::from_parts(12_204_000, 0) + .saturating_add(Weight::from_parts(0, 3707)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs new file mode 100644 index 00000000000..ca06f43f92e --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,74 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_timestamp +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `49` + // Estimated: `1493` + // Minimum execution time: 7_863_000 picoseconds. + Weight::from_parts(8_183_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 3_460_000 picoseconds. + Weight::from_parts(3_577_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs new file mode 100644 index 00000000000..c60a79d91da --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_utility.rs @@ -0,0 +1,101 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_utility` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 + +// Executed Command: +// ./target/production/polkadot-parachain +// benchmark +// pallet +// --chain=collectives-polkadot-dev +// --wasm-execution=compiled +// --pallet=pallet_utility +// --no-storage-info +// --no-median-slopes +// --no-min-squares +// --extrinsic=* +// --steps=50 +// --repeat=20 +// --json +// --header=./file_header.txt +// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_utility`. +pub struct WeightInfo(PhantomData); +impl pallet_utility::WeightInfo for WeightInfo { + /// The range of component `c` is `[0, 1000]`. + fn batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_650_000 picoseconds. + Weight::from_parts(7_474_437, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_625 + .saturating_add(Weight::from_parts(4_996_146, 0).saturating_mul(c.into())) + } + fn as_derivative() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_612_000 picoseconds. + Weight::from_parts(4_774_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn batch_all(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_744_000 picoseconds. + Weight::from_parts(10_889_913, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_281 + .saturating_add(Weight::from_parts(5_218_293, 0).saturating_mul(c.into())) + } + fn dispatch_as() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 8_673_000 picoseconds. + Weight::from_parts(8_980_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// The range of component `c` is `[0, 1000]`. + fn force_batch(c: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 6_744_000 picoseconds. + Weight::from_parts(7_801_721, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_395 + .saturating_add(Weight::from_parts(5_000_971, 0).saturating_mul(c.into())) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs new file mode 100644 index 00000000000..a3b42cb86c4 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_xcm.rs @@ -0,0 +1,323 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_xcm` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm +// --chain=collectives-westend-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_xcm`. +pub struct WeightInfo(PhantomData); +impl pallet_xcm::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn send() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 25_746_000 picoseconds. + Weight::from_parts(26_349_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn teleport_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `69` + // Estimated: `1489` + // Minimum execution time: 22_660_000 picoseconds. + Weight::from_parts(23_173_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn reserve_transfer_assets() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `Benchmark::Override` (r:0 w:0) + /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn execute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 7_321_000 picoseconds. + Weight::from_parts(7_542_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_default_xcm_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_232_000 picoseconds. + Weight::from_parts(2_395_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_subscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `145` + // Estimated: `3610` + // Minimum execution time: 29_006_000 picoseconds. + Weight::from_parts(29_777_000, 0) + .saturating_add(Weight::from_parts(0, 3610)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(5)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn force_unsubscribe_version_notify() -> Weight { + // Proof Size summary in bytes: + // Measured: `363` + // Estimated: `3828` + // Minimum execution time: 31_245_000 picoseconds. + Weight::from_parts(32_125_000, 0) + .saturating_add(Weight::from_parts(0, 3828)) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) + /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn force_suspension() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_255_000 picoseconds. + Weight::from_parts(2_399_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_supported_version() -> Weight { + // Proof Size summary in bytes: + // Measured: `162` + // Estimated: `11052` + // Minimum execution time: 16_521_000 picoseconds. + Weight::from_parts(17_001_000, 0) + .saturating_add(Weight::from_parts(0, 11052)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notifiers() -> Weight { + // Proof Size summary in bytes: + // Measured: `166` + // Estimated: `11056` + // Minimum execution time: 16_486_000 picoseconds. + Weight::from_parts(16_729_000, 0) + .saturating_add(Weight::from_parts(0, 11056)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn already_notified_target() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `13538` + // Minimum execution time: 18_037_000 picoseconds. + Weight::from_parts(18_310_000, 0) + .saturating_add(Weight::from_parts(0, 13538)) + .saturating_add(T::DbWeight::get().reads(5)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn notify_current_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `212` + // Estimated: `6152` + // Minimum execution time: 27_901_000 picoseconds. + Weight::from_parts(28_566_000, 0) + .saturating_add(Weight::from_parts(0, 6152)) + .saturating_add(T::DbWeight::get().reads(8)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn notify_target_migration_fail() -> Weight { + // Proof Size summary in bytes: + // Measured: `206` + // Estimated: `8621` + // Minimum execution time: 9_299_000 picoseconds. + Weight::from_parts(9_547_000, 0) + .saturating_add(Weight::from_parts(0, 8621)) + .saturating_add(T::DbWeight::get().reads(3)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn migrate_version_notify_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `173` + // Estimated: `11063` + // Minimum execution time: 16_768_000 picoseconds. + Weight::from_parts(17_215_000, 0) + .saturating_add(Weight::from_parts(0, 11063)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) + /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) + /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn migrate_and_notify_old_targets() -> Weight { + // Proof Size summary in bytes: + // Measured: `215` + // Estimated: `11105` + // Minimum execution time: 35_134_000 picoseconds. + Weight::from_parts(35_883_000, 0) + .saturating_add(Weight::from_parts(0, 11105)) + .saturating_add(T::DbWeight::get().reads(10)) + .saturating_add(T::DbWeight::get().writes(4)) + } + /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) + /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::Queries` (r:0 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn new_query() -> Weight { + // Proof Size summary in bytes: + // Measured: `103` + // Estimated: `1588` + // Minimum execution time: 4_562_000 picoseconds. + Weight::from_parts(4_802_000, 0) + .saturating_add(Weight::from_parts(0, 1588)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `PolkadotXcm::Queries` (r:1 w:1) + /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn take_response() -> Weight { + // Proof Size summary in bytes: + // Measured: `7740` + // Estimated: `11205` + // Minimum execution time: 26_865_000 picoseconds. + Weight::from_parts(27_400_000, 0) + .saturating_add(Weight::from_parts(0, 11205)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs new file mode 100644 index 00000000000..25679703831 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/paritydb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights + /// are available for brave runtime engineers who may want to try this out as default. + pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::ParityDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs new file mode 100644 index 00000000000..3dd817aa6f1 --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/rocksdb_weights.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod constants { + use frame_support::{ + parameter_types, + weights::{constants, RuntimeDbWeight}, + }; + + parameter_types! { + /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout + /// the runtime. + pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, + }; + } + + #[cfg(test)] + mod test_db_weights { + use super::constants::RocksDbWeight as W; + use frame_support::weights::constants; + + /// Checks that all weights exist and have sane values. + // NOTE: If this test fails but you are sure that the generated values are fine, + // you can delete it. + #[test] + fn sane() { + // At least 1 µs. + assert!( + W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Read weight should be at least 1 µs." + ); + assert!( + W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, + "Write weight should be at least 1 µs." + ); + // At most 1 ms. + assert!( + W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Read weight should be at most 1 ms." + ); + assert!( + W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, + "Write weight should be at most 1 ms." + ); + } + } +} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs new file mode 100644 index 00000000000..d58995827fa --- /dev/null +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -0,0 +1,364 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, Fellows, ParachainInfo, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, + TransactionByteFee, WeightToFee, WestendTreasuryAccount, XcmpQueue, +}; +use frame_support::{ + match_types, parameter_types, + traits::{ConstU32, Contains, Equals, Everything, Nothing}, + weights::Weight, +}; +use frame_system::EnsureRoot; +use pallet_xcm::XcmPassthrough; +use parachains_common::{ + impls::ToStakingPot, + xcm_config::{ConcreteAssetFromSystem, RelayOrOtherSystemParachains}, +}; +use polkadot_parachain_primitives::primitives::Sibling; +use polkadot_runtime_common::xcm_sender::ExponentialPrice; +use westend_runtime_constants::system_parachain; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, + AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, + DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, + LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, + RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, + SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, + TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, + XcmFeeManagerFromComponents, XcmFeeToAccount, +}; +use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; + +const FELLOWSHIP_ADMIN_INDEX: u32 = 1; + +parameter_types! { + pub const WndLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: Option = Some(NetworkId::Westend); + pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub UniversalLocation: InteriorMultiLocation = + X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); + pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(westend_runtime_constants::TREASURY_PALLET_ID)).into(); + pub CheckingAccount: AccountId = PolkadotXcm::check_account(); + pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); + pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); + pub AssetHub: MultiLocation = (Parent, Parachain(1000)).into(); + pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); + pub UsdtAssetHub: LocatableAssetId = LocatableAssetId { + location: AssetHub::get(), + asset_id: AssetHubUsdtId::get(), + }; + pub WndAssetHub: LocatableAssetId = LocatableAssetId { + location: AssetHub::get(), + asset_id: WndLocation::get().into(), + }; +} + +/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used +/// when determining ownership of accounts for asset transacting and when attempting to use XCM +/// `Transact` in order to determine the dispatch Origin. +pub type LocationToAccountId = ( + // The parent (Relay-chain) origin converts to the parent `AccountId`. + ParentIsPreset, + // Sibling parachain origins convert to AccountId via the `ParaId::into`. + SiblingParachainConvertsVia, + // Straight up local `AccountId32` origins just alias directly to `AccountId`. + AccountId32Aliases, +); + +/// Means for transacting the native currency on this chain. +pub type CurrencyTransactor = CurrencyAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // Convert an XCM MultiLocation into a local account id: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't track any teleports of `Balances`. + (), +>; + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// biases the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, + // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when + // recognised. + RelayChainAsNative, + // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when + // recognised. + SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, + // Native signed account converter; this just converts an `AccountId32` origin into a normal + // `RuntimeOrigin::Signed` origin of the same 32-byte value. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +parameter_types! { + /// The amount of weight an XCM operation takes. This is a safe overestimate. + pub const BaseXcmWeight: Weight = Weight::from_parts(1_000_000_000, 1024); + /// A temporary weight value for each XCM instruction. + /// NOTE: This should be removed after we account for PoV weights. + pub const TempFixedXcmWeight: Weight = Weight::from_parts(1_000_000_000, 0); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; + // Fellows pluralistic body. + pub const FellowsBodyId: BodyId = BodyId::Technical; +} + +match_types! { + pub type ParentOrParentsPlurality: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(Plurality { .. }) } + }; + pub type ParentOrSiblings: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(_) } + }; +} + +/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly +/// account for proof size weights. +/// +/// Calls that are allowed through this filter must: +/// 1. Have a fixed weight; +/// 2. Cannot lead to another call being made; +/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. +pub struct SafeCallFilter; +impl Contains for SafeCallFilter { + fn contains(call: &RuntimeCall) -> bool { + #[cfg(feature = "runtime-benchmarks")] + { + if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { + return true + } + } + + matches!( + call, + RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::Timestamp(..) | + RuntimeCall::Balances(..) | + RuntimeCall::CollatorSelection( + pallet_collator_selection::Call::set_desired_candidates { .. } | + pallet_collator_selection::Call::set_candidacy_bond { .. } | + pallet_collator_selection::Call::register_as_candidate { .. } | + pallet_collator_selection::Call::leave_intent { .. } | + pallet_collator_selection::Call::set_invulnerables { .. } | + pallet_collator_selection::Call::add_invulnerable { .. } | + pallet_collator_selection::Call::remove_invulnerable { .. }, + ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | + RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | + RuntimeCall::XcmpQueue(..) | + RuntimeCall::MessageQueue(..) | + RuntimeCall::Alliance( + // `init_members` accepts unbounded vecs as arguments, + // but the call can be initiated only by root origin. + pallet_alliance::Call::init_members { .. } | + pallet_alliance::Call::vote { .. } | + pallet_alliance::Call::disband { .. } | + pallet_alliance::Call::set_rule { .. } | + pallet_alliance::Call::announce { .. } | + pallet_alliance::Call::remove_announcement { .. } | + pallet_alliance::Call::join_alliance { .. } | + pallet_alliance::Call::nominate_ally { .. } | + pallet_alliance::Call::elevate_ally { .. } | + pallet_alliance::Call::give_retirement_notice { .. } | + pallet_alliance::Call::retire { .. } | + pallet_alliance::Call::kick_member { .. } | + pallet_alliance::Call::close { .. } | + pallet_alliance::Call::abdicate_fellow_status { .. }, + ) | RuntimeCall::AllianceMotion( + pallet_collective::Call::vote { .. } | + pallet_collective::Call::disapprove_proposal { .. } | + pallet_collective::Call::close { .. }, + ) | RuntimeCall::FellowshipCollective( + pallet_ranked_collective::Call::add_member { .. } | + pallet_ranked_collective::Call::promote_member { .. } | + pallet_ranked_collective::Call::demote_member { .. } | + pallet_ranked_collective::Call::remove_member { .. }, + ) | RuntimeCall::FellowshipCore( + pallet_core_fellowship::Call::bump { .. } | + pallet_core_fellowship::Call::set_params { .. } | + pallet_core_fellowship::Call::set_active { .. } | + pallet_core_fellowship::Call::approve { .. } | + pallet_core_fellowship::Call::induct { .. } | + pallet_core_fellowship::Call::promote { .. } | + pallet_core_fellowship::Call::offboard { .. } | + pallet_core_fellowship::Call::submit_evidence { .. } | + pallet_core_fellowship::Call::import { .. }, + ) + ) + } +} + +pub type Barrier = TrailingSetTopicAsId< + DenyThenTry< + DenyReserveTransferToRelayChain, + ( + // Allow local users to buy weight credit. + TakeWeightCredit, + // Expected responses are OK. + AllowKnownQueryResponses, + // Allow XCMs with some computed origins to pass through. + WithComputedOrigin< + ( + // If the message is one that immediately attempts to pay for execution, then + // allow it. + AllowTopLevelPaidExecutionFrom, + // Parent and its pluralities (i.e. governance bodies) get free execution. + AllowExplicitUnpaidExecutionFrom, + // Subscriptions for version tracking are OK. + AllowSubscriptionsFrom, + ), + UniversalLocation, + ConstU32<8>, + >, + ), + >, +>; + +match_types! { + pub type SystemParachains: impl Contains = { + MultiLocation { + parents: 1, + interior: X1(Parachain( + system_parachain::ASSET_HUB_ID | + system_parachain::BRIDGE_HUB_ID | + system_parachain::COLLECTIVES_ID + )), + } + }; +} + +/// Locations that will not be charged fees in the executor, +/// either execution or delivery. +/// We only waive fees for system functions, which these locations represent. +pub type WaivedLocations = + (RelayOrOtherSystemParachains, Equals); + +/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: +/// - DOT with the parent Relay Chain and sibling parachains. +pub type TrustedTeleporters = ConcreteAssetFromSystem; + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = XcmRouter; + type AssetTransactor = CurrencyTransactor; + type OriginConverter = XcmOriginToTransactDispatchOrigin; + // Collectives does not recognize a reserve location for any asset. Users must teleport WND + // where allowed (e.g. with the Relay Chain). + type IsReserve = (); + type IsTeleporter = TrustedTeleporters; + type UniversalLocation = UniversalLocation; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = + UsingComponents>; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type AssetLocker = (); + type AssetExchanger = (); + type FeeManager = XcmFeeManagerFromComponents< + WaivedLocations, + XcmFeeToAccount, + >; + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = WithOriginFilter; + type SafeCallFilter = SafeCallFilter; + type Aliasers = Nothing; +} + +/// Converts a local signed origin into an XCM multilocation. +/// Forms the basis for local origins sending/executing XCMs. +pub type LocalOriginToLocation = SignedToAccountId32; + +pub type PriceForParentDelivery = + ExponentialPrice; + +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = WithUniqueTopic<( + // Two routers - use UMP to communicate with the relay chain: + cumulus_primitives_utility::ParentAsUmp, + // ..and XCMP to communicate with the sibling chains. + XcmpQueue, +)>; + +#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub ReachableDest: Option = Some(Parent.into()); +} + +/// Type to convert the Fellows origin to a Plurality `MultiLocation` value. +pub type FellowsToPlurality = OriginToPluralityVoice; + +impl pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + // We only allow the Fellows to send messages. + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + // We support local origins dispatching XCM executions in principle... + type ExecuteXcmOrigin = EnsureXcmOrigin; + // ... but disallow generic XCM execution. As a result only teleports are allowed. + type XcmExecuteFilter = Nothing; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. + type Weigher = FixedWeightBounds; + type UniversalLocation = UniversalLocation; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; + type Currency = Balances; + type CurrencyMatcher = (); + type TrustedLockers = (); + type SovereignAccountOf = LocationToAccountId; + type MaxLockers = ConstU32<8>; + type WeightInfo = crate::weights::pallet_xcm::WeightInfo; + type AdminOrigin = EnsureRoot; + type MaxRemoteLockConsumers = ConstU32<0>; + type RemoteLockConsumerIdentifier = (); +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = XcmExecutor; +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml new file mode 100644 index 00000000000..a30cdf35769 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -0,0 +1,138 @@ +[package] +name = "glutton-westend-runtime" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +description = "Glutton parachain runtime." + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } + +# Substrate +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false} +frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true} +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} + +# Polkadot +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } +cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } +cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } +cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } +parachains-common = { path = "../../../common", default-features = false } + +[build-dependencies] +substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } + +[features] +default = [ "std" ] +runtime-benchmarks = [ + "cumulus-pallet-parachain-system/runtime-benchmarks", + "cumulus-primitives-core/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system-benchmarking/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-glutton/runtime-benchmarks", + "pallet-message-queue/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "parachains-common/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", + "xcm-builder/runtime-benchmarks", + "xcm-executor/runtime-benchmarks", +] +std = [ + "codec/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-xcm/std", + "cumulus-primitives-aura/std", + "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", + "frame-benchmarking?/std", + "frame-executive/std", + "frame-support/std", + "frame-system-benchmarking?/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "frame-try-runtime?/std", + "pallet-aura/std", + "pallet-glutton/std", + "pallet-message-queue/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "parachain-info/std", + "parachains-common/std", + "scale-info/std", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-genesis-builder/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-storage/std", + "sp-transaction-pool/std", + "sp-version/std", + "xcm-builder/std", + "xcm-executor/std", + "xcm/std", +] +try-runtime = [ + "cumulus-pallet-aura-ext/try-runtime", + "cumulus-pallet-parachain-system/try-runtime", + "cumulus-pallet-xcm/try-runtime", + "frame-executive/try-runtime", + "frame-support/try-runtime", + "frame-system/try-runtime", + "frame-try-runtime/try-runtime", + "pallet-aura/try-runtime", + "pallet-glutton/try-runtime", + "pallet-message-queue/try-runtime", + "pallet-sudo/try-runtime", + "pallet-timestamp/try-runtime", + "parachain-info/try-runtime", + "sp-runtime/try-runtime", +] + +experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs new file mode 100644 index 00000000000..1580e6f07be --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/build.rs @@ -0,0 +1,24 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use substrate_wasm_builder::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs new file mode 100644 index 00000000000..60107281c22 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -0,0 +1,532 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Glutton Westend Runtime +//! +//! The purpose of the Glutton parachain is to do stress testing on the Kusama +//! network. This runtime targets the Westend runtime to allow development +//! separate to the Kusama runtime. +//! +//! There may be multiple instances of the Glutton parachain deployed and +//! connected to its parent relay chain. +//! +//! These parachains are not holding any real value. Their purpose is to stress +//! test the network. +//! +//! ### Governance +//! +//! Glutton defers its governance (namely, its `Root` origin), to its Relay +//! Chain parent, Kusama (or Westend for development purposes). +//! +//! ### XCM +//! +//! Since the main goal of Glutton is solely stress testing, the parachain will +//! only be able receive XCM messages from the Relay Chain via DMP. This way the +//! Glutton parachains will be able to listen for upgrades that are coming from +//! the Relay chain. + +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod weights; +pub mod xcm_config; + +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; +use sp_api::impl_runtime_apis; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +use cumulus_primitives_core::AggregateMessageOrigin; +pub use frame_support::{ + construct_runtime, + dispatch::DispatchClass, + genesis_builder_helper::{build_config, create_default_config}, + parameter_types, + traits::{ + ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, IsInVec, Randomness, + }, + weights::{ + constants::{ + BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, + }, + IdentityFee, Weight, + }, + PalletId, StorageValue, +}; +use frame_system::{ + limits::{BlockLength, BlockWeights}, + EnsureRoot, +}; +use parachains_common::{AccountId, Signature}; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("glutton-westend"), + impl_name: create_runtime_str!("glutton-westend"), + authoring_version: 1, + spec_version: 10000, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, + state_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. +/// This is used to limit the maximal weight of a single extrinsic. +const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for .5 seconds of compute with a 12 second average block time. +const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), + cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, +); + +/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included +/// into the relay chain. +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; +/// How many parachain blocks are processed by the relay chain per parent. Limits the +/// number of blocks authored per slot. +const BLOCK_PROCESSING_VELOCITY: u32 = 2; +/// Relay chain slot duration, in milliseconds. +const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + +/// This determines the average expected block time that we are targeting. +/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. +/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +/// up by `pallet_aura` to implement `fn slot_duration()`. +/// +/// Change this to adjust the block time. +pub const MILLISECS_PER_BLOCK: u64 = 6000; +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + +parameter_types! { + pub const BlockHashCount: BlockNumber = 4096; + pub const Version: RuntimeVersion = VERSION; + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have some extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type RuntimeCall = RuntimeCall; + type Lookup = AccountIdLookup; + type Nonce = Nonce; + type Hash = Hash; + type Hashing = BlakeTwo256; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type RuntimeOrigin = RuntimeOrigin; + type BlockHashCount = BlockHashCount; + type Version = Version; + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type DbWeight = (); + type BaseCallFilter = frame_support::traits::Everything; + type SystemWeightInfo = (); + type BlockWeights = RuntimeBlockWeights; + type BlockLength = RuntimeBlockLength; + type SS58Prefix = SS58Prefix; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +parameter_types! { + // We do anything the parent chain tells us in this runtime. + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(2); + pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; +} + +type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; + +impl cumulus_pallet_parachain_system::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type DmpQueue = frame_support::traits::EnqueueWithOrigin; + type OutboundXcmpMessageSource = (); + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = ConsensusHook; + type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; +} + +parameter_types! { + pub MessageQueueServiceWeight: Weight = Perbill::from_percent(80) * + RuntimeBlockWeights::get().max_block; +} + +impl pallet_message_queue::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_message_queue::WeightInfo; + #[cfg(feature = "runtime-benchmarks")] + type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< + cumulus_primitives_core::AggregateMessageOrigin, + >; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessageProcessor = xcm_builder::ProcessXcmMessage< + AggregateMessageOrigin, + xcm_executor::XcmExecutor, + RuntimeCall, + >; + type Size = u32; + type QueueChangeHandler = (); + type QueuePausedQuery = (); // No XCMP queue pallet deployed. + type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; + type MaxStale = sp_core::ConstU32<8>; + type ServiceWeight = MessageQueueServiceWeight; +} + +impl parachain_info::Config for Runtime {} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_timestamp::Config for Runtime { + type Moment = u64; + type OnTimestampSet = Aura; + #[cfg(feature = "experimental")] + type MinimumPeriod = ConstU64<0>; + #[cfg(not(feature = "experimental"))] + type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; + type WeightInfo = weights::pallet_timestamp::WeightInfo; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = ConstU64; +} + +impl pallet_glutton::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type WeightInfo = weights::pallet_glutton::WeightInfo; + type AdminOrigin = EnsureRoot; +} + +impl pallet_sudo::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type RuntimeCall = RuntimeCall; + type WeightInfo = (); +} + +construct_runtime! { + pub enum Runtime + { + System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, + ParachainSystem: cumulus_pallet_parachain_system::{ + Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, + } = 1, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 2, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, + + // DMP handler. + CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 10, + MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 11, + + // The main stage. + Glutton: pallet_glutton::{Pallet, Call, Storage, Event, Config} = 20, + + // Collator support + Aura: pallet_aura::{Pallet, Storage, Config} = 30, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 31, + + // Sudo. + Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, + } +} + +/// Index of a transaction in the chain. +pub type Nonce = u32; +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; +/// An index to a block. +pub type BlockNumber = u32; +/// The address format for describing accounts. +pub type Address = sp_runtime::MultiAddress; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + pallet_sudo::CheckOnlySudoAccount, + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPalletsWithSystem, +>; + +#[cfg(feature = "runtime-benchmarks")] +#[macro_use] +extern crate frame_benchmarking; + +#[cfg(feature = "runtime-benchmarks")] +mod benches { + define_benchmarks!( + [cumulus_pallet_parachain_system, ParachainSystem] + [frame_system, SystemBench::] + [pallet_glutton, Glutton] + [pallet_message_queue, MessageQueue] + [pallet_timestamp, Timestamp] + ); +} + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + + fn metadata_at_version(version: u32) -> Option { + Runtime::metadata_at_version(version) + } + + fn metadata_versions() -> sp_std::vec::Vec { + Runtime::metadata_versions() + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().into_inner() + } + } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic( + extrinsic: ::Extrinsic, + ) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info(header) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + + let mut list = Vec::::new(); + list_benchmarks!(list, extra); + + let storage_info = AllPalletsWithSystem::storage_info(); + + (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use sp_storage::TrackedStorageKey; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime { + fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { + ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); + Ok(()) + } + + fn verify_set_code() { + System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); + } + } + + use frame_support::traits::WhitelistedStorageKeys; + let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + add_benchmarks!(params, batches); + Ok(batches) + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + create_default_config::() + } + + fn build_config(config: Vec) -> sp_genesis_builder::Result { + build_config::(config) + } + } +} + +cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs new file mode 100644 index 00000000000..bc8299ab1bd --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/cumulus_pallet_parachain_system.rs @@ -0,0 +1,75 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `cumulus_pallet_parachain_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=cumulus_pallet_parachain_system +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `cumulus_pallet_parachain_system`. +pub struct WeightInfo(PhantomData); +impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { + /// Storage: `ParachainSystem::LastDmqMqcHead` (r:1 w:1) + /// Proof: `ParachainSystem::LastDmqMqcHead` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `ParachainSystem::ProcessedDownwardMessages` (r:0 w:1) + /// Proof: `ParachainSystem::ProcessedDownwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `MessageQueue::Pages` (r:0 w:1000) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `12` + // Estimated: `3517` + // Minimum execution time: 1_745_000 picoseconds. + Weight::from_parts(1_859_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + // Standard Error: 53_384 + .saturating_add(Weight::from_parts(196_309_089, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(4)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs new file mode 100644 index 00000000000..6f8cf4f39df --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/frame_system.rs @@ -0,0 +1,153 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `frame_system` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=frame_system +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `frame_system`. +pub struct WeightInfo(PhantomData); +impl frame_system::WeightInfo for WeightInfo { + /// The range of component `b` is `[0, 3932160]`. + fn remark(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_570_000 picoseconds. + Weight::from_parts(1_626_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 0 + .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) + } + /// The range of component `b` is `[0, 3932160]`. + fn remark_with_event(b: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_200_000 picoseconds. + Weight::from_parts(4_262_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 4 + .saturating_add(Weight::from_parts(1_791, 0).saturating_mul(b.into())) + } + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) + fn set_heap_pages() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `1485` + // Minimum execution time: 2_680_000 picoseconds. + Weight::from_parts(2_936_000, 0) + .saturating_add(Weight::from_parts(0, 1485)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) + /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) + /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) + /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) + /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn set_code() -> Weight { + // Proof Size summary in bytes: + // Measured: `127` + // Estimated: `1612` + // Minimum execution time: 119_097_302_000 picoseconds. + Weight::from_parts(120_914_576_000, 0) + .saturating_add(Weight::from_parts(0, 1612)) + .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn set_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_606_000 picoseconds. + Weight::from_parts(1_704_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 2_090 + .saturating_add(Weight::from_parts(765_829, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `i` is `[0, 1000]`. + fn kill_storage(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_646_000 picoseconds. + Weight::from_parts(1_719_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 1_067 + .saturating_add(Weight::from_parts(578_598, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) + } + /// Storage: `Skipped::Metadata` (r:0 w:0) + /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// The range of component `p` is `[0, 1000]`. + fn kill_prefix(p: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `58 + p * (69 ±0)` + // Estimated: `53 + p * (70 ±0)` + // Minimum execution time: 2_933_000 picoseconds. + Weight::from_parts(3_069_000, 0) + .saturating_add(Weight::from_parts(0, 53)) + // Standard Error: 1_844 + .saturating_add(Weight::from_parts(1_214_377, 0).saturating_mul(p.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) + .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs new file mode 100644 index 00000000000..47f9d1ee105 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/mod.rs @@ -0,0 +1,19 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod cumulus_pallet_parachain_system; +pub mod pallet_glutton; +pub mod pallet_message_queue; +pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs new file mode 100644 index 00000000000..9345458a704 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_glutton.rs @@ -0,0 +1,178 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_glutton` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_glutton +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_glutton`. +pub struct WeightInfo(PhantomData); +impl pallet_glutton::WeightInfo for WeightInfo { + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn initialize_pallet_grow(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `87` + // Estimated: `1489` + // Minimum execution time: 6_453_000 picoseconds. + Weight::from_parts(6_629_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + // Standard Error: 3_416 + .saturating_add(Weight::from_parts(9_938_610, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + /// Storage: `Glutton::TrashDataCount` (r:1 w:1) + /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:0 w:1000) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `n` is `[0, 1000]`. + fn initialize_pallet_shrink(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `120` + // Estimated: `1489` + // Minimum execution time: 6_456_000 picoseconds. + Weight::from_parts(6_564_000, 0) + .saturating_add(Weight::from_parts(0, 1489)) + // Standard Error: 1_336 + .saturating_add(Weight::from_parts(1_141_705, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) + } + /// The range of component `i` is `[0, 100000]`. + fn waste_ref_time_iter(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 679_000 picoseconds. + Weight::from_parts(3_310_101, 0) + .saturating_add(Weight::from_parts(0, 0)) + // Standard Error: 10 + .saturating_add(Weight::from_parts(103_703, 0).saturating_mul(i.into())) + } + /// Storage: `Glutton::TrashData` (r:5000 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + /// The range of component `i` is `[0, 5000]`. + fn waste_proof_size_some(i: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `119115 + i * (1022 ±0)` + // Estimated: `990 + i * (3016 ±0)` + // Minimum execution time: 765_000 picoseconds. + Weight::from_parts(1_004_000, 0) + .saturating_add(Weight::from_parts(0, 990)) + // Standard Error: 4_008 + .saturating_add(Weight::from_parts(6_130_770, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) + .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:1737 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + fn on_idle_high_proof_waste() -> Weight { + // Proof Size summary in bytes: + // Measured: `1900498` + // Estimated: `5239782` + // Minimum execution time: 97_248_614_000 picoseconds. + Weight::from_parts(97_728_420_000, 0) + .saturating_add(Weight::from_parts(0, 5239782)) + .saturating_add(T::DbWeight::get().reads(1739)) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::TrashData` (r:5 w:0) + /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) + fn on_idle_low_proof_waste() -> Weight { + // Proof Size summary in bytes: + // Measured: `9548` + // Estimated: `16070` + // Minimum execution time: 97_305_112_000 picoseconds. + Weight::from_parts(97_427_728_000, 0) + .saturating_add(Weight::from_parts(0, 16070)) + .saturating_add(T::DbWeight::get().reads(7)) + } + /// Storage: `Glutton::Storage` (r:1 w:0) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Glutton::Compute` (r:1 w:0) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn empty_on_idle() -> Weight { + // Proof Size summary in bytes: + // Measured: `87` + // Estimated: `1493` + // Minimum execution time: 4_125_000 picoseconds. + Weight::from_parts(4_339_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + } + /// Storage: `Glutton::Compute` (r:0 w:1) + /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set_compute() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_879_000 picoseconds. + Weight::from_parts(4_211_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `Glutton::Storage` (r:0 w:1) + /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set_storage() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_920_000 picoseconds. + Weight::from_parts(4_081_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(1)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs new file mode 100644 index 00000000000..eab6c15a40d --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs @@ -0,0 +1,179 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_message_queue +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `223` + // Estimated: `6044` + // Minimum execution time: 10_833_000 picoseconds. + Weight::from_parts(11_237_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `218` + // Estimated: `6044` + // Minimum execution time: 9_399_000 picoseconds. + Weight::from_parts(9_773_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `6` + // Estimated: `3517` + // Minimum execution time: 3_277_000 picoseconds. + Weight::from_parts(3_358_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_429_000 picoseconds. + Weight::from_parts(5_667_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `72` + // Estimated: `69050` + // Minimum execution time: 5_538_000 picoseconds. + Weight::from_parts(5_803_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 89_888_000 picoseconds. + Weight::from_parts(90_929_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `171` + // Estimated: `3517` + // Minimum execution time: 6_129_000 picoseconds. + Weight::from_parts(6_414_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 52_366_000 picoseconds. + Weight::from_parts(53_500_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 67_848_000 picoseconds. + Weight::from_parts(68_910_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `65667` + // Estimated: `69050` + // Minimum execution time: 107_564_000 picoseconds. + Weight::from_parts(109_377_000, 0) + .saturating_add(Weight::from_parts(0, 69050)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs new file mode 100644 index 00000000000..4218dcc73f4 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_timestamp.rs @@ -0,0 +1,73 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Autogenerated weights for `pallet_timestamp` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot-parachain +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_timestamp +// --chain=glutton-westend-dev-1300 +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_timestamp`. +pub struct WeightInfo(PhantomData); +impl pallet_timestamp::WeightInfo for WeightInfo { + /// Storage: `Timestamp::Now` (r:1 w:1) + /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Aura::CurrentSlot` (r:1 w:0) + /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn set() -> Weight { + // Proof Size summary in bytes: + // Measured: `86` + // Estimated: `1493` + // Minimum execution time: 6_306_000 picoseconds. + Weight::from_parts(6_592_000, 0) + .saturating_add(Weight::from_parts(0, 1493)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + fn on_finalize() -> Weight { + // Proof Size summary in bytes: + // Measured: `57` + // Estimated: `0` + // Minimum execution time: 2_900_000 picoseconds. + Weight::from_parts(3_030_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + } +} diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs new file mode 100644 index 00000000000..5ebb0ade123 --- /dev/null +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/xcm_config.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{ + AccountId, AllPalletsWithSystem, ParachainInfo, Runtime, RuntimeCall, RuntimeEvent, + RuntimeOrigin, +}; +use frame_support::{ + match_types, parameter_types, + traits::{Everything, Nothing}, + weights::Weight, +}; +use xcm::latest::prelude::*; +use xcm_builder::{ + AllowExplicitUnpaidExecutionFrom, FixedWeightBounds, ParentAsSuperuser, ParentIsPreset, + SovereignSignedViaLocation, +}; + +parameter_types! { + pub const WestendLocation: MultiLocation = MultiLocation::parent(); + pub const WestendNetwork: Option = Some(NetworkId::Westend); + pub UniversalLocation: InteriorMultiLocation = X1(Parachain(ParachainInfo::parachain_id().into())); +} + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can +/// bias the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, RuntimeOrigin>, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, +); + +match_types! { + pub type JustTheParent: impl Contains = { MultiLocation { parents:1, interior: Here } }; +} + +parameter_types! { + // One XCM operation is 1_000_000_000 weight - almost certainly a conservative estimate. + pub UnitWeightCost: Weight = Weight::from_parts(1_000_000_000, 64 * 1024); + pub const MaxInstructions: u32 = 100; + pub const MaxAssetsIntoHolding: u32 = 64; +} + +pub struct XcmConfig; +impl xcm_executor::Config for XcmConfig { + type RuntimeCall = RuntimeCall; + type XcmSender = (); // sending XCM not supported + type AssetTransactor = (); // balances not supported + type OriginConverter = XcmOriginToTransactDispatchOrigin; + type IsReserve = (); // balances not supported + type IsTeleporter = (); // balances not supported + type UniversalLocation = UniversalLocation; + type Barrier = AllowExplicitUnpaidExecutionFrom; + type Weigher = FixedWeightBounds; // balances not supported + type Trader = (); // balances not supported + type ResponseHandler = (); // Don't handle responses for now. + type AssetTrap = (); // don't trap for now + type AssetClaims = (); // don't claim for now + type SubscriptionService = (); // don't handle subscriptions for now + type PalletInstancesInfo = AllPalletsWithSystem; + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type AssetLocker = (); + type AssetExchanger = (); + type FeeManager = (); + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; +} + +impl cumulus_pallet_xcm::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type XcmExecutor = xcm_executor::XcmExecutor; +} diff --git a/cumulus/parachains/testnets-common/Cargo.toml b/cumulus/parachains/testnets-common/Cargo.toml new file mode 100644 index 00000000000..e39cf91d3ab --- /dev/null +++ b/cumulus/parachains/testnets-common/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "testnets-common" +version = "1.0.0" +authors.workspace = true +edition.workspace = true +description = "Logic and configuration specific to testnet parachain runtimes" +license = "Apache-2.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +smallvec = "1.11.0" + +# Substrate +frame-support = { path = "../../../substrate/frame/support", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } + +# Polkadot +rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false} +westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false} +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} + +# Cumulus + +[dev-dependencies] + +[build-dependencies] +substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } + +[features] +default = [ "std" ] +std = [ + "frame-support/std", + "polkadot-core-primitives/std", + "rococo-runtime-constants/std", + "sp-runtime/std", + "westend-runtime-constants/std", +] + +runtime-benchmarks = [ + "frame-support/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/cumulus/parachains/testnets-common/src/lib.rs b/cumulus/parachains/testnets-common/src/lib.rs new file mode 100644 index 00000000000..42d367bff27 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/lib.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +/// Since the parachains-common package is now published to crates.io, SP runtimes for testnets +/// will be adapted to use this package, and their config removed from the published common +/// package. Only the configs specific to rococo, westend and wococo will be moved here, and the +/// truly common logic will still be sourced from the parachains-common package. +/// +/// In practice this just means that instead of using e.g. `[parachains_common::westend::*]`, now +/// the westend configs will be in `[testnets_common::westend::*]`. +/// +/// TODO: edit all runtimes to remove the testnet configs as part of PR #1737 +/// +pub mod rococo; +pub mod westend; +pub mod wococo; diff --git a/cumulus/parachains/testnets-common/src/rococo.rs b/cumulus/parachains/testnets-common/src/rococo.rs new file mode 100644 index 00000000000..6e31def4b55 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/rococo.rs @@ -0,0 +1,119 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod currency { + use polkadot_core_primitives::Balance; + use rococo_runtime_constants as constants; + + /// The existential deposit. Set to 1/10 of its parent Relay Chain (v9010). + pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + + pub const UNITS: Balance = constants::currency::UNITS; + pub const CENTS: Balance = constants::currency::CENTS; + pub const MILLICENTS: Balance = constants::currency::MILLICENTS; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + // map to 1/100 of what the rococo relay chain charges + constants::currency::deposit(items, bytes) / 100 + } +} + +pub mod fee { + use frame_support::{ + pallet_prelude::Weight, + weights::{ + constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, + WeightToFeeCoefficients, WeightToFeePolynomial, + }, + }; + use polkadot_core_primitives::Balance; + use smallvec::smallvec; + pub use sp_runtime::Perbill; + + /// The block saturation level. Fees will be updates based on this value. + pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); + + /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the + /// node's balance type. + /// + /// This should typically create a mapping between the following ranges: + /// - `[0, MAXIMUM_BLOCK_WEIGHT]` + /// - `[Balance::min, Balance::max]` + /// + /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: + /// - Setting it to `0` will essentially disable the weight fee. + /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. + pub struct WeightToFee; + impl frame_support::weights::WeightToFee for WeightToFee { + type Balance = Balance; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); + let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); + + // Take the maximum instead of the sum to charge by the more scarce resource. + time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) + } + } + + /// Maps the reference time component of `Weight` to a fee. + pub struct RefTimeToFee; + impl WeightToFeePolynomial for RefTimeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // In Rococo, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. + let p = super::currency::CENTS; + let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } + + /// Maps the proof size component of `Weight` to a fee. + pub struct ProofSizeToFee; + impl WeightToFeePolynomial for ProofSizeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // Map 10kb proof to 1 CENT. + let p = super::currency::CENTS; + let q = 10_000; + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } +} + +/// Consensus-related. +pub mod consensus { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included + /// into the relay chain. + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; + /// How many parachain blocks are processed by the relay chain per parent. Limits the + /// number of blocks authored per slot. + pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} diff --git a/cumulus/parachains/testnets-common/src/westend.rs b/cumulus/parachains/testnets-common/src/westend.rs new file mode 100644 index 00000000000..0ae21e23454 --- /dev/null +++ b/cumulus/parachains/testnets-common/src/westend.rs @@ -0,0 +1,140 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// Universally recognized accounts. +pub mod account { + use frame_support::PalletId; + + /// Westend treasury pallet id, used to convert into AccountId - in Westend as a destination for + /// slashed funds. + pub const WESTEND_TREASURY_PALLET_ID: PalletId = PalletId(*b"py/trsry"); + /// Alliance pallet ID - used as a temporary place to deposit a slashed imbalance before the + /// teleport to the Treasury. + pub const ALLIANCE_PALLET_ID: PalletId = PalletId(*b"py/allia"); + /// Referenda pallet ID - used as a temporary place to deposit a slashed imbalance before the + /// teleport to the Treasury. + pub const REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/refer"); + /// Ambassador Referenda pallet ID - used as a temporary place to deposit a slashed imbalance + /// before the teleport to the Treasury. + pub const AMBASSADOR_REFERENDA_PALLET_ID: PalletId = PalletId(*b"py/amref"); +} + +pub mod currency { + use polkadot_core_primitives::Balance; + use westend_runtime_constants as constants; + + /// The existential deposit. Set to 1/10 of its parent Relay Chain. + pub const EXISTENTIAL_DEPOSIT: Balance = constants::currency::EXISTENTIAL_DEPOSIT / 10; + + pub const UNITS: Balance = constants::currency::UNITS; + pub const DOLLARS: Balance = UNITS; // 1_000_000_000_000 + pub const CENTS: Balance = constants::currency::CENTS; + pub const MILLICENTS: Balance = constants::currency::MILLICENTS; + pub const GRAND: Balance = constants::currency::GRAND; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + // 1/100 of Westend testnet + constants::currency::deposit(items, bytes) / 100 + } +} + +/// Fee-related. +pub mod fee { + use frame_support::{ + pallet_prelude::Weight, + weights::{ + constants::ExtrinsicBaseWeight, FeePolynomial, WeightToFeeCoefficient, + WeightToFeeCoefficients, WeightToFeePolynomial, + }, + }; + use polkadot_core_primitives::Balance; + use smallvec::smallvec; + pub use sp_runtime::Perbill; + + /// The block saturation level. Fees will be updated based on this value. + pub const TARGET_BLOCK_FULLNESS: Perbill = Perbill::from_percent(25); + + /// Handles converting a weight scalar to a fee value, based on the scale and granularity of the + /// node's balance type. + /// + /// This should typically create a mapping between the following ranges: + /// - [0, MAXIMUM_BLOCK_WEIGHT] + /// - [Balance::min, Balance::max] + /// + /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: + /// - Setting it to `0` will essentially disable the weight fee. + /// - Setting it to `1` will cause the literal `#[weight = x]` values to be charged. + pub struct WeightToFee; + impl frame_support::weights::WeightToFee for WeightToFee { + type Balance = Balance; + + fn weight_to_fee(weight: &Weight) -> Self::Balance { + let time_poly: FeePolynomial = RefTimeToFee::polynomial().into(); + let proof_poly: FeePolynomial = ProofSizeToFee::polynomial().into(); + + // Take the maximum instead of the sum to charge by the more scarce resource. + time_poly.eval(weight.ref_time()).max(proof_poly.eval(weight.proof_size())) + } + } + + /// Maps the reference time component of `Weight` to a fee. + pub struct RefTimeToFee; + impl WeightToFeePolynomial for RefTimeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // In Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + // The standard system parachain configuration is 1/10 of that, as in 1/100 CENT. + let p = super::currency::CENTS; + let q = 100 * Balance::from(ExtrinsicBaseWeight::get().ref_time()); + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } + + /// Maps the proof size component of `Weight` to a fee. + pub struct ProofSizeToFee; + impl WeightToFeePolynomial for ProofSizeToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + // Map 10kb proof to 1 CENT. + let p = super::currency::CENTS; + let q = 10_000; + + smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } + } +} + +/// Consensus-related. +pub mod consensus { + /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the + /// relay chain. + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; + /// How many parachain blocks are processed by the relay chain per parent. Limits the number of + /// blocks authored per slot. + pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + /// Relay chain slot duration, in milliseconds. + pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; +} diff --git a/cumulus/parachains/testnets-common/src/wococo.rs b/cumulus/parachains/testnets-common/src/wococo.rs new file mode 100644 index 00000000000..5cd6121135a --- /dev/null +++ b/cumulus/parachains/testnets-common/src/wococo.rs @@ -0,0 +1,17 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// re-export rococo +pub use crate::rococo::{consensus, currency, fee}; diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index f7252a39a38..d5deda9e7bf 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -24,6 +24,7 @@ serde_json = "1.0.108" # Local rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } shell-runtime = { path = "../parachains/runtimes/starters/shell" } +glutton-westend-runtime = { path = "../parachains/runtimes/glutton/glutton-westend" } glutton-runtime = { path = "../parachains/runtimes/glutton/glutton-kusama" } seedling-runtime = { path = "../parachains/runtimes/starters/seedling" } asset-hub-polkadot-runtime = { path = "../parachains/runtimes/assets/asset-hub-polkadot" } @@ -31,6 +32,7 @@ asset-hub-kusama-runtime = { path = "../parachains/runtimes/assets/asset-hub-kus asset-hub-rococo-runtime = { path = "../parachains/runtimes/assets/asset-hub-rococo" } asset-hub-westend-runtime = { path = "../parachains/runtimes/assets/asset-hub-westend" } collectives-polkadot-runtime = { path = "../parachains/runtimes/collectives/collectives-polkadot" } +collectives-westend-runtime = { path = "../parachains/runtimes/collectives/collectives-westend" } contracts-rococo-runtime = { path = "../parachains/runtimes/contracts/contracts-rococo" } bridge-hub-rococo-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-rococo" } bridge-hub-kusama-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-kusama" } @@ -119,11 +121,13 @@ runtime-benchmarks = [ "bridge-hub-rococo-runtime/runtime-benchmarks", "bridge-hub-westend-runtime/runtime-benchmarks", "collectives-polkadot-runtime/runtime-benchmarks", + "collectives-westend-runtime/runtime-benchmarks", "contracts-rococo-runtime/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "glutton-runtime/runtime-benchmarks", + "glutton-westend-runtime/runtime-benchmarks", "parachains-common/runtime-benchmarks", "penpal-runtime/runtime-benchmarks", "polkadot-cli/runtime-benchmarks", @@ -143,8 +147,10 @@ try-runtime = [ "bridge-hub-rococo-runtime/try-runtime", "bridge-hub-westend-runtime/try-runtime", "collectives-polkadot-runtime/try-runtime", + "collectives-westend-runtime/try-runtime", "contracts-rococo-runtime/try-runtime", "glutton-runtime/try-runtime", + "glutton-westend-runtime/try-runtime", "penpal-runtime/try-runtime", "polkadot-cli/try-runtime", "polkadot-service/try-runtime", diff --git a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs index 0a8064f50ca..07bd742fa8e 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs @@ -23,9 +23,12 @@ use sc_service::ChainType; use sp_core::sr25519; pub type CollectivesPolkadotChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type CollectivesWestendChainSpec = sc_service::GenericChainSpec<(), Extensions>; const COLLECTIVES_POLKADOT_ED: CollectivesBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; +const COLLECTIVES_WESTEND_ED: CollectivesBalance = + parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; /// Generate the session keys from individual elements. /// @@ -158,3 +161,133 @@ fn collectives_polkadot_genesis( }, }) } + +/// Generate the session keys from individual elements. +/// +/// The input must be a tuple of individual keys (a single arg for now since we have just one key). +pub fn collectives_westend_session_keys(keys: AuraId) -> collectives_westend_runtime::SessionKeys { + collectives_westend_runtime::SessionKeys { aura: keys } +} + +pub fn collectives_westend_development_config() -> CollectivesWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + CollectivesWestendChainSpec::builder( + collectives_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-dev".into(), para_id: 1002 }, + ) + .with_name("Westend Collectives Development") + .with_id("collectives_westend_dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(collectives_westend_genesis( + // initial collators. + vec![( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + )], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + // 1002 avoids a potential collision with Kusama-1001 (Encointer) should there ever + // be a collective para on Kusama. + 1002.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() +} + +/// Collectives Westend Local Config. +pub fn collectives_westend_local_config() -> CollectivesWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + properties.insert("tokenSymbol".into(), "WND".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + CollectivesWestendChainSpec::builder( + collectives_westend_runtime::WASM_BINARY + .expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-local".into(), para_id: 1002 }, + ) + .with_name("Westend Collectives Local") + .with_id("collectives_westend_local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(collectives_westend_genesis( + // initial collators. + vec![ + ( + get_account_id_from_seed::("Alice"), + get_collator_keys_from_seed::("Alice"), + ), + ( + get_account_id_from_seed::("Bob"), + get_collator_keys_from_seed::("Bob"), + ), + ], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + 1002.into(), + )) + .with_boot_nodes(Vec::new()) + .with_properties(properties) + .build() +} + +fn collectives_westend_genesis( + invulnerables: Vec<(AccountId, AuraId)>, + endowed_accounts: Vec, + id: ParaId, +) -> serde_json::Value { + serde_json::json!( { + "balances": { + "balances": endowed_accounts + .iter() + .cloned() + .map(|k| (k, COLLECTIVES_WESTEND_ED * 4096)) + .collect::>(), + }, + "parachainInfo": { + "parachainId": id, + }, + "collatorSelection": { + "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), + "candidacyBond": COLLECTIVES_WESTEND_ED * 16, + }, + "session": { + "keys": invulnerables + .into_iter() + .map(|(acc, aura)| { + ( + acc.clone(), // account id + acc, // validator id + collectives_westend_session_keys(aura), // session keys + ) + }) + .collect::>(), + }, + // no need to pass anything to aura, in fact it will panic if we do. Session will take care + // of this. + "polkadotXcm": { + "safeXcmVersion": Some(SAFE_XCM_VERSION), + }, + }) +} diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index 1a0a06404c5..aff1358d1ae 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -24,6 +24,7 @@ use super::get_collator_keys_from_seed; /// Specialized `ChainSpec` for the Glutton parachain runtime. pub type GluttonChainSpec = sc_service::GenericChainSpec<(), Extensions>; +pub type GluttonWestendChainSpec = sc_service::GenericChainSpec<(), Extensions>; pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { GluttonChainSpec::builder( @@ -92,3 +93,71 @@ fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json:: "aura": { "authorities": collators }, }) } + +pub fn glutton_westend_development_config(para_id: ParaId) -> GluttonWestendChainSpec { + GluttonWestendChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-dev".into(), para_id: para_id.into() }, + ) + .with_name("Glutton Development") + .with_id("glutton_westend_dev") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![get_collator_keys_from_seed::("Alice")], + )) + .build() +} + +pub fn glutton_westend_local_config(para_id: ParaId) -> GluttonWestendChainSpec { + GluttonWestendChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend-local".into(), para_id: para_id.into() }, + ) + .with_name("Glutton Local") + .with_id("glutton_westend_local") + .with_chain_type(ChainType::Local) + .with_genesis_config_patch(glutton_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + )) + .build() +} + +pub fn glutton_westend_config(para_id: ParaId) -> GluttonWestendChainSpec { + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("ss58Format".into(), 42.into()); + + GluttonChainSpec::builder( + glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), + Extensions { relay_chain: "westend".into(), para_id: para_id.into() }, + ) + .with_name(format!("Glutton {}", para_id).as_str()) + .with_id(format!("glutton-westend-{}", para_id).as_str()) + .with_chain_type(ChainType::Live) + .with_genesis_config_patch(glutton_westend_genesis( + para_id, + vec![ + get_collator_keys_from_seed::("Alice"), + get_collator_keys_from_seed::("Bob"), + ], + )) + .with_protocol_id(format!("glutton-westend-{}", para_id).as_str()) + .with_properties(properties) + .build() +} + +fn glutton_westend_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { + serde_json::json!( { + "parachainInfo": { + "parachainId": parachain_id + }, + "sudo": { + "key": Some(get_account_id_from_seed::("Alice")), + }, + "aura": { "authorities": collators }, + }) +} diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 3f93c90558a..2799175b8ee 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -50,6 +50,7 @@ enum Runtime { CollectivesPolkadot, CollectivesWestend, Glutton, + GluttonWestend, BridgeHub(chain_spec::bridge_hubs::BridgeHubRuntimeType), } @@ -111,6 +112,8 @@ fn runtime(id: &str) -> Runtime { id.parse::() .expect("Invalid value"), ) + } else if id.starts_with("glutton-westend") { + Runtime::GluttonWestend } else if id.starts_with("glutton") { Runtime::Glutton } else { @@ -219,8 +222,12 @@ fn load_spec(id: &str) -> std::result::Result, String> { Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( &include_bytes!("../chain-specs/collectives-polkadot.json")[..], )?), + "collectives-westend-dev" => + Box::new(chain_spec::collectives::collectives_westend_development_config()), + "collectives-westend-local" => + Box::new(chain_spec::collectives::collectives_westend_local_config()), "collectives-westend" => - Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( + Box::new(chain_spec::collectives::CollectivesWestendChainSpec::from_json_bytes( &include_bytes!("../chain-specs/collectives-westend.json")[..], )?), @@ -254,6 +261,18 @@ fn load_spec(id: &str) -> std::result::Result, String> { "polkadot-local", )), + // -- Glutton Westend + "glutton-westend-dev" => Box::new(chain_spec::glutton::glutton_westend_development_config( + para_id.expect("Must specify parachain id"), + )), + "glutton-westend-local" => Box::new(chain_spec::glutton::glutton_westend_local_config( + para_id.expect("Must specify parachain id"), + )), + // the chain spec as used for generating the upgrade genesis values + "glutton-westend-genesis" => Box::new(chain_spec::glutton::glutton_westend_config( + para_id.expect("Must specify parachain id"), + )), + // -- Glutton "glutton-kusama-dev" => Box::new(chain_spec::glutton::glutton_development_config( para_id.expect("Must specify parachain id"), @@ -288,9 +307,12 @@ fn load_spec(id: &str) -> std::result::Result, String> { Runtime::AssetHubWestend => Box::new( chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_file(path)?, ), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => Box::new( + Runtime::CollectivesPolkadot => Box::new( chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_file(path)?, ), + Runtime::CollectivesWestend => Box::new( + chain_spec::collectives::CollectivesWestendChainSpec::from_json_file(path)?, + ), Runtime::Shell => Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?), Runtime::Seedling => @@ -301,6 +323,8 @@ fn load_spec(id: &str) -> std::result::Result, String> { bridge_hub_runtime_type.chain_spec_from_json_file(path)?, Runtime::Penpal(_para_id) => Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?), + Runtime::GluttonWestend => + Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), Runtime::Glutton => Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), Runtime::Default => Box::new( @@ -322,6 +346,10 @@ fn extract_parachain_id(id: &str) -> (&str, &str, Option) { const GLUTTON_PARA_LOCAL_PREFIX: &str = "glutton-kusama-local-"; const GLUTTON_PARA_GENESIS_PREFIX: &str = "glutton-kusama-genesis-"; + const GLUTTON_WESTEND_PARA_DEV_PREFIX: &str = "glutton-westend-dev-"; + const GLUTTON_WESTEND_PARA_LOCAL_PREFIX: &str = "glutton-westend-local-"; + const GLUTTON_WESTEND_PARA_GENESIS_PREFIX: &str = "glutton-westend-genesis-"; + let (norm_id, orig_id, para) = if let Some(suffix) = id.strip_prefix(KUSAMA_TEST_PARA_PREFIX) { let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); (&id[..KUSAMA_TEST_PARA_PREFIX.len() - 1], id, Some(para_id)) @@ -337,6 +365,15 @@ fn extract_parachain_id(id: &str) -> (&str, &str, Option) { } else if let Some(suffix) = id.strip_prefix(GLUTTON_PARA_GENESIS_PREFIX) { let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); (&id[..GLUTTON_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_DEV_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_DEV_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_LOCAL_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_LOCAL_PREFIX.len() - 1], id, Some(para_id)) + } else if let Some(suffix) = id.strip_prefix(GLUTTON_WESTEND_PARA_GENESIS_PREFIX) { + let para_id: u32 = suffix.parse().expect("Invalid parachain-id suffix"); + (&id[..GLUTTON_WESTEND_PARA_GENESIS_PREFIX.len() - 1], id, Some(para_id)) } else { (id, id, None) }; @@ -494,13 +531,20 @@ macro_rules! construct_partials { $code }, }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + Runtime::CollectivesPolkadot => { let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, )?; $code }, + Runtime::CollectivesWestend => { + let $partials = new_partial::( + &$config, + crate::service::aura_build_import_queue::<_, AuraId>, + )?; + $code + }, Runtime::Shell => { let $partials = new_partial::( &$config, @@ -529,6 +573,13 @@ macro_rules! construct_partials { )?; $code }, + Runtime::GluttonWestend => { + let $partials = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + $code + }, Runtime::Glutton => { let $partials = new_partial::( &$config, @@ -584,7 +635,7 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { + Runtime::CollectivesPolkadot => { runner.async_run(|$config| { let $components = new_partial::( &$config, @@ -594,6 +645,16 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, + Runtime::CollectivesWestend => { + runner.async_run(|$config| { + let $components = new_partial::( + &$config, + crate::service::aura_build_import_queue::<_, AuraId>, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + }, Runtime::Shell => { runner.async_run(|$config| { let $components = new_partial::( @@ -705,6 +766,16 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, + Runtime::GluttonWestend => { + runner.async_run(|$config| { + let $components = new_partial::( + &$config, + crate::service::shell_build_import_queue, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + }, Runtime::Glutton => { runner.async_run(|$config| { let $components = new_partial::( @@ -836,7 +907,7 @@ pub fn run() -> Result<()> { // that both file paths exist, the node will exit, as the user must decide (by // deleting one path) the information that they want to use as their DB. let old_name = match config.chain_spec.id() { - "asset-hub-polkadot" => Some("statemint"), + "asset-hub-polkadot" => Some("statemint"), "asset-hub-kusama" => Some("statemine"), "asset-hub-westend" => Some("westmint"), "asset-hub-rococo" => Some("rockmine"), @@ -921,7 +992,7 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), - Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => + Runtime::CollectivesPolkadot => crate::service::start_generic_aura_node::< collectives_polkadot_runtime::RuntimeApi, AuraId, @@ -929,6 +1000,14 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), + Runtime::CollectivesWestend => + crate::service::start_generic_aura_node::< + collectives_westend_runtime::RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), Runtime::Shell => crate::service::start_shell_node::( config, @@ -962,7 +1041,7 @@ pub fn run() -> Result<()> { .map(|r| r.0) .map_err(Into::into), Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | +chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => crate::service::start_generic_aura_node::< @@ -1019,6 +1098,14 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), + Runtime::GluttonWestend => + crate::service::start_basic_lookahead_node::< + glutton_westend_runtime::RuntimeApi, + AuraId, + >(config, polkadot_config, collator_options, id, hwbench) + .await + .map(|r| r.0) + .map_err(Into::into), Runtime::Glutton => crate::service::start_basic_lookahead_node::< glutton_runtime::RuntimeApi, diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 3bcc9b7f60d..3884fce246c 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -156,6 +156,21 @@ impl sc_executor::NativeExecutionDispatch for CollectivesPolkadotRuntimeExecutor } } +/// Native Westend Collectives executor instance. +pub struct CollectivesWestendRuntimeExecutor; + +impl sc_executor::NativeExecutionDispatch for CollectivesWestendRuntimeExecutor { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + collectives_westend_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + collectives_westend_runtime::native_version() + } +} + /// Native BridgeHubPolkadot executor instance. pub struct BridgeHubPolkadotRuntimeExecutor; @@ -216,6 +231,21 @@ impl sc_executor::NativeExecutionDispatch for ContractsRococoRuntimeExecutor { } } +/// Native Westend Glutton executor instance. +pub struct GluttonWestendRuntimeExecutor; + +impl sc_executor::NativeExecutionDispatch for GluttonWestendRuntimeExecutor { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + glutton_westend_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + glutton_westend_runtime::native_version() + } +} + /// Native Glutton executor instance. pub struct GluttonRuntimeExecutor; diff --git a/cumulus/scripts/benchmarks.sh b/cumulus/scripts/benchmarks.sh index 29d06905925..7da18d9440e 100755 --- a/cumulus/scripts/benchmarks.sh +++ b/cumulus/scripts/benchmarks.sh @@ -7,6 +7,7 @@ repeat=${3:-20} __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ${__dir}/benchmarks-ci.sh collectives collectives-polkadot target/$target $steps $repeat +${__dir}/benchmarks-ci.sh collectives collectives-westend target/$target $steps $repeat ${__dir}/benchmarks-ci.sh assets asset-hub-kusama target/$target $steps $repeat ${__dir}/benchmarks-ci.sh assets asset-hub-polkadot target/$target $steps $repeat @@ -17,3 +18,4 @@ ${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-kusama target/$target $steps $r ${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-rococo target/$target $steps $repeat ${__dir}/benchmarks-ci.sh glutton glutton-kusama target/$target $steps $repeat +${__dir}/benchmarks-ci.sh glutton glutton-westend target/$target $steps $repeat -- GitLab From 824b782390b16b433df89dd3ff451ea3f0ea03a1 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 15 Nov 2023 17:36:14 +0200 Subject: [PATCH 061/199] xcm: SovereignPaidRemoteExporter: remove unused RefundSurplus instruction (#2312) Refunding surplus happens anyway on xcm_executor::post_process(), automatically refunding surplus to original_origin at the end of execution. Since SovereignPaidRemoteExporter doesn't ClearOrigin, it can simply rely on the automatic mechanism. Furthermore, RefundSurplus instruction refunds _surplus_. Surplus exists only as a result of Transact, SetErrorHandler or SetAppendix instructions, none of which being part of the SovereignPaidRemoteExporter XCM program. So surplus is always zero here anyway. --- .../bridge-hubs/test-utils/src/test_cases.rs | 1 - .../src/tests/bridging/local_para_para.rs | 6 +++--- .../src/tests/bridging/local_relay_relay.rs | 4 ++-- .../tests/bridging/paid_remote_relay_relay.rs | 21 +++++++++---------- .../xcm/xcm-builder/src/universal_exports.rs | 1 - 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs index b421eea6bcf..7a86d85c86f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/src/test_cases.rs @@ -909,7 +909,6 @@ where ]), ]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: MultiLocation { parents: 1, interior: X1(Parachain(1000)) }, diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs index 406843a0fe8..de08dbee953 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_para_para.rs @@ -44,7 +44,7 @@ fn sending_to_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get(), Parachain(1)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); assert_eq!( take_received_remote_messages(), @@ -78,7 +78,7 @@ fn sending_to_parachain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get(), Parachain(1000)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( (Parent, Parachain(1000)).into(), @@ -110,7 +110,7 @@ fn sending_to_relay_chain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Parent, Remote::get()).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( Parent.into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs index 02c454bb212..8433b6e0212 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/local_relay_relay.rs @@ -41,7 +41,7 @@ fn sending_to_bridged_chain_works() { let msg = Xcm(vec![Trap(1)]); assert_eq!( send_xcm::((Parent, Remote::get()).into(), msg).unwrap().1, - (Here, 100).into() + Price::get() ); assert_eq!(TheBridge::service(), 1); let expected = vec![( @@ -68,7 +68,7 @@ fn sending_to_parachain_of_bridged_chain_works() { maybe_with_topic(|| { let msg = Xcm(vec![Trap(1)]); let dest = (Parent, Remote::get(), Parachain(1000)).into(); - assert_eq!(send_xcm::(dest, msg).unwrap().1, (Here, 100).into()); + assert_eq!(send_xcm::(dest, msg).unwrap().1, Price::get()); assert_eq!(TheBridge::service(), 1); let expected = vec![( Parachain(1000).into(), diff --git a/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs b/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs index 45dc2d4a3b9..23d6eb99a90 100644 --- a/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs +++ b/polkadot/xcm/xcm-builder/src/tests/bridging/paid_remote_relay_relay.rs @@ -24,6 +24,9 @@ use super::*; parameter_types! { + // 100 to use the bridge (export) and 80 for the remote execution weight (4 instructions x (10 + + // 10) weight each). + pub SendOverBridgePrice: u128 = 180u128 + if UsingTopic::get() { 20 } else { 0 }; pub UniversalLocation: Junctions = X2(GlobalConsensus(Local::get()), Parachain(100)); pub RelayUniversalLocation: Junctions = X1(GlobalConsensus(Local::get())); pub RemoteUniversalLocation: Junctions = X1(GlobalConsensus(Remote::get())); @@ -32,11 +35,9 @@ parameter_types! { Remote::get(), None, MultiLocation::parent(), - Some((Parent, 200u128 + if UsingTopic::get() { 20 } else { 0 }).into()) + Some((Parent, SendOverBridgePrice::get()).into()) ) ]; - // ^^^ 100 to use the bridge (export) and 100 for the remote execution weight (5 instructions - // x (10 + 10) weight each). } type TheBridge = TestBridge>; @@ -68,7 +69,7 @@ fn sending_to_bridged_chain_works() { clear_assets(Parachain(100)); add_asset(Parachain(100), (Here, 1000u128)); - let price = 200u128 + if UsingTopic::get() { 20 } else { 0 }; + let price = SendOverBridgePrice::get(); let msg = Xcm(vec![Trap(1)]); assert_eq!(send_xcm::(dest, msg).unwrap().1, (Parent, price).into()); @@ -86,7 +87,7 @@ fn sending_to_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // The export cost 40 ref time and 40 proof size weight units (and thus 80 units of // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); @@ -104,11 +105,10 @@ fn sending_to_bridged_chain_works() { destination: Here, xcm: xcm_with_topic([0; 32], vec![Trap(1)]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: Parachain(100).into() }, ], ), - outcome: Outcome::Complete(test_weight(5)), + outcome: Outcome::Complete(test_weight(4)), paid: true, }; assert_eq!(RoutingLog::take(), vec![entry]); @@ -143,7 +143,7 @@ fn sending_to_parachain_of_bridged_chain_works() { clear_assets(Parachain(100)); add_asset(Parachain(100), (Here, 1000u128)); - let price = 200u128 + if UsingTopic::get() { 20 } else { 0 }; + let price = SendOverBridgePrice::get(); let msg = Xcm(vec![Trap(1)]); assert_eq!(send_xcm::(dest, msg).unwrap().1, (Parent, price).into()); @@ -161,7 +161,7 @@ fn sending_to_parachain_of_bridged_chain_works() { )]; assert_eq!(take_received_remote_messages(), expected); - // The export cost 50 ref time and 50 proof size weight units (and thus 100 units of + // The export cost 40 ref time and 40 proof size weight units (and thus 80 units of // balance). assert_eq!(asset_list(Parachain(100)), vec![(Here, 1000u128 - price).into()]); @@ -179,11 +179,10 @@ fn sending_to_parachain_of_bridged_chain_works() { destination: Parachain(100).into(), xcm: xcm_with_topic([0; 32], vec![Trap(1)]), }, - RefundSurplus, DepositAsset { assets: Wild(All), beneficiary: Parachain(100).into() }, ], ), - outcome: Outcome::Complete(test_weight(5)), + outcome: Outcome::Complete(test_weight(4)), paid: true, }; assert_eq!(RoutingLog::take(), vec![entry]); diff --git a/polkadot/xcm/xcm-builder/src/universal_exports.rs b/polkadot/xcm/xcm-builder/src/universal_exports.rs index dbe9571d461..8e2cf88b3c3 100644 --- a/polkadot/xcm/xcm-builder/src/universal_exports.rs +++ b/polkadot/xcm/xcm-builder/src/universal_exports.rs @@ -305,7 +305,6 @@ impl Date: Wed, 15 Nov 2023 16:40:07 +0100 Subject: [PATCH 062/199] [testnet] Remove Wococo stuff from BridgeHubRococo/AssetHubRococo (#2300) Rococo<>Wococo bridge is replaced by Rococo Co-authored-by: Svyatoslav Nikolsky --- Cargo.lock | 66 --- Cargo.toml | 5 - bridges/README.md | 175 +------- bridges/bin/runtime-common/src/lib.rs | 41 +- bridges/bin/runtime-common/src/mock.rs | 46 +- .../runtime-common/src/priority_calculator.rs | 1 + bridges/docs/high-level-overview.md | 2 +- bridges/modules/grandpa/src/mock.rs | 40 +- bridges/modules/grandpa/src/weights.rs | 62 +-- bridges/modules/messages/Cargo.toml | 2 - bridges/modules/messages/src/mock.rs | 54 +-- bridges/modules/messages/src/weights.rs | 226 +++++----- bridges/modules/parachains/src/mock.rs | 41 +- bridges/modules/parachains/src/weights.rs | 122 +++--- bridges/modules/parachains/src/weights_ext.rs | 2 +- bridges/modules/relayers/Cargo.toml | 1 - bridges/modules/relayers/src/mock.rs | 44 +- bridges/modules/relayers/src/weights.rs | 2 +- .../modules/xcm-bridge-hub-router/src/mock.rs | 31 +- .../xcm-bridge-hub-router/src/weights.rs | 2 +- .../chain-asset-hub-kusama/Cargo.toml | 26 -- .../chain-asset-hub-kusama/src/lib.rs | 49 --- .../chain-asset-hub-polkadot/Cargo.toml | 28 -- .../chain-asset-hub-polkadot/src/lib.rs | 49 --- .../chain-asset-hub-rococo/src/lib.rs | 3 - .../chain-asset-hub-wococo/Cargo.toml | 26 -- .../chain-asset-hub-wococo/src/lib.rs | 48 --- .../chain-bridge-hub-cumulus/Cargo.toml | 2 +- .../chain-bridge-hub-kusama/Cargo.toml | 2 +- .../chain-bridge-hub-polkadot/Cargo.toml | 2 +- .../chain-bridge-hub-rococo/src/lib.rs | 3 - .../chain-bridge-hub-wococo/Cargo.toml | 34 -- .../chain-bridge-hub-wococo/src/lib.rs | 90 ---- bridges/primitives/chain-wococo/Cargo.toml | 34 -- bridges/primitives/chain-wococo/src/lib.rs | 68 --- bridges/primitives/runtime/src/chain.rs | 6 +- bridges/primitives/runtime/src/extensions.rs | 2 +- bridges/primitives/runtime/src/lib.rs | 35 -- bridges/scripts/verify-pallets-build.sh | 9 - bridges/zombienet/README.md | 4 +- .../helpers/wait-hrmp-channel-opened.js | 22 + bridges/zombienet/run-tests.sh | 4 +- bridges/zombienet/scripts/invoke-script.sh | 2 +- ...set-transfer-works-rococo-to-westend.zndsl | 26 ++ ...sset-transfer-works-rococo-to-wococo.zndsl | 25 -- ...et-transfer-works-westend-to-rococo.zndsl} | 19 +- bridges/zombienet/tests/0001-start-relay.sh | 2 +- .../bridges/bridge-hub-rococo/src/genesis.rs | 12 +- .../assets/asset-hub-rococo/Cargo.toml | 4 - .../assets/asset-hub-rococo/src/lib.rs | 120 +----- .../asset-hub-rococo/src/weights/mod.rs | 4 +- ...end.rs => pallet_xcm_bridge_hub_router.rs} | 56 ++- .../pallet_xcm_bridge_hub_router_to_rococo.rs | 130 ------ .../pallet_xcm_bridge_hub_router_to_wococo.rs | 130 ------ .../xcm/pallet_xcm_benchmarks_fungible.rs | 64 ++- .../xcm/pallet_xcm_benchmarks_generic.rs | 144 +++---- .../assets/asset-hub-rococo/src/xcm_config.rs | 213 +--------- .../assets/asset-hub-rococo/tests/tests.rs | 287 +------------ .../parachains/runtimes/bridge-hubs/README.md | 127 +----- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 6 - .../src/bridge_common_config.rs | 54 +-- .../src/bridge_to_rococo_config.rs | 317 -------------- .../src/bridge_to_westend_config.rs | 2 +- .../src/bridge_to_wococo_config.rs | 318 -------------- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 329 +-------------- .../bridge-hub-rococo/src/weights/mod.rs | 73 +--- .../src/weights/pallet_bridge_grandpa.rs | 89 ++-- .../pallet_bridge_grandpa_rococo_finality.rs | 82 ---- .../pallet_bridge_grandpa_westend_finality.rs | 83 ---- .../pallet_bridge_grandpa_wococo_finality.rs | 82 ---- .../src/weights/pallet_bridge_messages.rs | 312 +++++++------- ...allet_bridge_messages_rococo_to_westend.rs | 245 ----------- ...pallet_bridge_messages_rococo_to_wococo.rs | 244 ----------- ...pallet_bridge_messages_wococo_to_rococo.rs | 244 ----------- .../src/weights/pallet_bridge_parachains.rs | 128 +++--- .../pallet_bridge_parachains_within_rococo.rs | 113 ----- ...pallet_bridge_parachains_within_westend.rs | 116 ------ .../pallet_bridge_parachains_within_wococo.rs | 115 ----- .../src/weights/pallet_bridge_relayers.rs | 49 +-- .../xcm/pallet_xcm_benchmarks_fungible.rs | 87 ++-- .../xcm/pallet_xcm_benchmarks_generic.rs | 209 +++++----- .../bridge-hub-rococo/src/xcm_config.rs | 54 +-- .../bridge-hub-rococo/tests/tests.rs | 393 +----------------- .../bridge-hubs/test-utils/Cargo.toml | 4 - .../src/chain_spec/asset_hubs.rs | 87 +--- .../src/chain_spec/bridge_hubs.rs | 62 --- cumulus/polkadot-parachain/src/command.rs | 52 +-- cumulus/scripts/bridges_rococo_westend.sh | 22 +- cumulus/scripts/bridges_rococo_wococo.sh | 386 ----------------- .../bridge_hub_rococo_local_network.toml | 12 +- .../bridge_hub_westend_local_network.toml | 12 +- .../bridge_hub_wococo_local_network.toml | 94 ----- 92 files changed, 962 insertions(+), 6389 deletions(-) delete mode 100644 bridges/primitives/chain-asset-hub-kusama/Cargo.toml delete mode 100644 bridges/primitives/chain-asset-hub-kusama/src/lib.rs delete mode 100644 bridges/primitives/chain-asset-hub-polkadot/Cargo.toml delete mode 100644 bridges/primitives/chain-asset-hub-polkadot/src/lib.rs delete mode 100644 bridges/primitives/chain-asset-hub-wococo/Cargo.toml delete mode 100644 bridges/primitives/chain-asset-hub-wococo/src/lib.rs delete mode 100644 bridges/primitives/chain-bridge-hub-wococo/Cargo.toml delete mode 100644 bridges/primitives/chain-bridge-hub-wococo/src/lib.rs delete mode 100644 bridges/primitives/chain-wococo/Cargo.toml delete mode 100644 bridges/primitives/chain-wococo/src/lib.rs create mode 100644 bridges/zombienet/helpers/wait-hrmp-channel-opened.js create mode 100644 bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl delete mode 100644 bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl rename bridges/zombienet/tests/{0001-asset-transfer-works-wococo-to-rococo.zndsl => 0001-asset-transfer-works-westend-to-rococo.zndsl} (64%) mode change 100644 => 100755 bridges/zombienet/tests/0001-start-relay.sh rename cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/{pallet_xcm_bridge_hub_router_to_westend.rs => pallet_xcm_bridge_hub_router.rs} (75%) delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs delete mode 100755 cumulus/scripts/bridges_rococo_wococo.sh delete mode 100644 cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml diff --git a/Cargo.lock b/Cargo.lock index 6b52e62d742..0c374a90a18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -921,10 +921,8 @@ dependencies = [ "assets-common", "bp-asset-hub-rococo", "bp-asset-hub-westend", - "bp-asset-hub-wococo", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-bridge-hub-wococo", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", @@ -1690,27 +1688,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "bp-asset-hub-kusama" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "bp-asset-hub-polkadot" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", - "sp-runtime", -] - [[package]] name = "bp-asset-hub-rococo" version = "0.1.0" @@ -1731,16 +1708,6 @@ dependencies = [ "scale-info", ] -[[package]] -name = "bp-asset-hub-wococo" -version = "0.1.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - [[package]] name = "bp-bridge-hub-cumulus" version = "0.1.0" @@ -1807,19 +1774,6 @@ dependencies = [ "sp-std 8.0.0", ] -[[package]] -name = "bp-bridge-hub-wococo" -version = "0.1.0" -dependencies = [ - "bp-bridge-hub-cumulus", - "bp-messages", - "bp-runtime", - "frame-support", - "sp-api", - "sp-runtime", - "sp-std 8.0.0", -] - [[package]] name = "bp-header-chain" version = "0.1.0" @@ -2011,19 +1965,6 @@ dependencies = [ "sp-std 8.0.0", ] -[[package]] -name = "bp-wococo" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-polkadot-core", - "bp-rococo", - "bp-runtime", - "frame-support", - "sp-api", - "sp-std 8.0.0", -] - [[package]] name = "bp-xcm-bridge-hub-router" version = "0.1.0" @@ -2205,10 +2146,8 @@ version = "0.1.0" dependencies = [ "bp-asset-hub-rococo", "bp-asset-hub-westend", - "bp-asset-hub-wococo", "bp-bridge-hub-rococo", "bp-bridge-hub-westend", - "bp-bridge-hub-wococo", "bp-header-chain", "bp-messages", "bp-parachains", @@ -2217,7 +2156,6 @@ dependencies = [ "bp-rococo", "bp-runtime", "bp-westend", - "bp-wococo", "bridge-hub-test-utils", "bridge-runtime-common", "cumulus-pallet-aura-ext", @@ -2291,8 +2229,6 @@ name = "bridge-hub-test-utils" version = "0.1.0" dependencies = [ "asset-test-utils", - "bp-bridge-hub-rococo", - "bp-bridge-hub-wococo", "bp-header-chain", "bp-messages", "bp-parachains", @@ -9661,7 +9597,6 @@ dependencies = [ "pallet-balances", "parity-scale-codec", "scale-info", - "sp-core", "sp-io", "sp-runtime", "sp-std 8.0.0", @@ -9706,7 +9641,6 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-arithmetic", - "sp-core", "sp-io", "sp-runtime", "sp-std 8.0.0", diff --git a/Cargo.toml b/Cargo.toml index 27351c09581..ed252e07053 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,23 +14,18 @@ members = [ "bridges/modules/parachains", "bridges/modules/relayers", "bridges/modules/xcm-bridge-hub-router", - "bridges/primitives/chain-asset-hub-kusama", - "bridges/primitives/chain-asset-hub-polkadot", "bridges/primitives/chain-asset-hub-rococo", "bridges/primitives/chain-asset-hub-westend", - "bridges/primitives/chain-asset-hub-wococo", "bridges/primitives/chain-bridge-hub-cumulus", "bridges/primitives/chain-bridge-hub-kusama", "bridges/primitives/chain-bridge-hub-polkadot", "bridges/primitives/chain-bridge-hub-rococo", "bridges/primitives/chain-bridge-hub-westend", - "bridges/primitives/chain-bridge-hub-wococo", "bridges/primitives/chain-kusama", "bridges/primitives/chain-polkadot", "bridges/primitives/chain-polkadot-bulletin", "bridges/primitives/chain-rococo", "bridges/primitives/chain-westend", - "bridges/primitives/chain-wococo", "bridges/primitives/header-chain", "bridges/primitives/messages", "bridges/primitives/parachains", diff --git a/bridges/README.md b/bridges/README.md index da46fe67d92..a2ce213d254 100644 --- a/bridges/README.md +++ b/bridges/README.md @@ -68,7 +68,7 @@ For example, consider the case below where we want to bridge two Substrate based ``` +---------------+ +---------------+ | | | | -| Rialto | | Millau | +| Rococo | | Westend | | | | | +-------+-------+ +-------+-------+ ^ ^ @@ -79,9 +79,9 @@ For example, consider the case below where we want to bridge two Substrate based +---------------+ ``` -The Millau chain must be able to accept Rialto headers and verify their integrity. It does this by using a runtime +The Rococo chain must be able to accept Westend headers and verify their integrity. It does this by using a runtime module designed to track GRANDPA finality. Since two blockchains can't interact directly they need an external service, -called a relayer, to communicate. The relayer will subscribe to new Rialto headers via RPC and submit them to the Millau +called a relayer, to communicate. The relayer will subscribe to new Rococo headers via RPC and submit them to the Westend chain for verification. Take a look at [Bridge High Level Documentation](./docs/high-level-overview.md) for more in-depth description of the @@ -94,164 +94,23 @@ Here's an overview of how the project is laid out. The main bits are the `bin`, messages between chains. ``` -├── bin // Node and Runtime for the various Substrate chains -│ └── ... -├── deployments // Useful tools for deploying test networks +├── modules // Substrate Runtime Modules (a.k.a Pallets) +│ ├── beefy // On-Chain BEEFY Light Client (in progress) +│ ├── grandpa // On-Chain GRANDPA Light Client +│ ├── messages // Cross Chain Message Passing +│ ├── parachains // On-Chain Parachains Light Client +│ ├── relayers // Relayer Rewards Registry +│ ├── xcm-bridge-hub // Multiple Dynamic Bridges Support +│ ├── xcm-bridge-hub-router // XCM Router that may be used to Connect to XCM Bridge Hub +├── primitives // Code shared between modules, runtimes, and relays │ └── ... -├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── beefy // On-Chain BEEFY Light Client (in progress) -│ ├── grandpa // On-Chain GRANDPA Light Client -│ ├── messages // Cross Chain Message Passing -│ ├── parachains // On-Chain Parachains Light Client -│ ├── relayers // Relayer rewards registry +├── relays // Application for sending finality proofs and messages between chains │ └── ... -├── primitives // Code shared between modules, runtimes, and relays -│ └── ... -├── relays // Application for sending finality proofs and messages between chains -│ └── ... -└── scripts // Useful development and maintenance scripts +└── scripts // Useful development and maintenance scripts ``` ## Running the Bridge -To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes on each side of the -bridge (source and target chain). - -There are 2 ways to run the bridge, described below: - -- building & running from source: with this option, you'll be able to run the bridge between two standalone chains that -are running GRANDPA finality gadget to achieve finality; - -- running a Docker Compose setup: this is a recommended option, where you'll see bridges with parachains, complex relays -and more. - -### Using the Source - -First you'll need to build the bridge nodes and relay. This can be done as follows: - -```bash -# In `parity-bridges-common` folder -cargo build -p rialto-bridge-node -cargo build -p millau-bridge-node -cargo build -p substrate-relay -``` - -### Running a Dev network - -We will launch a dev network to demonstrate how to relay a message between two Substrate based chains (named Rialto and -Millau). - -To do this we will need two nodes, two relayers which will relay headers, and two relayers which will relay messages. - -#### Running from local scripts - -To run a simple dev network you can use the scripts located in the [`deployments/local-scripts` -folder](./deployments/local-scripts). - -First, we must run the two Substrate nodes. - -```bash -# In `parity-bridges-common` folder -./deployments/local-scripts/run-rialto-node.sh -./deployments/local-scripts/run-millau-node.sh -``` - -After the nodes are up we can run the header relayers. - -```bash -./deployments/local-scripts/relay-millau-to-rialto.sh -./deployments/local-scripts/relay-rialto-to-millau.sh -``` - -At this point you should see the relayer submitting headers from the Millau Substrate chain to the Rialto Substrate -chain. - -``` -# Header Relayer Logs -[Millau_to_Rialto_Sync] [date] DEBUG bridge Going to submit finality proof of Millau header #147 to Rialto -[...] [date] INFO bridge Synced 147 of 147 headers -[...] [date] DEBUG bridge Going to submit finality proof of Millau header #148 to Rialto -[...] [date] INFO bridge Synced 148 of 149 headers -``` - -Finally, we can run the message relayers. - -```bash -./deployments/local-scripts/relay-messages-millau-to-rialto.sh -./deployments/local-scripts/relay-messages-rialto-to-millau.sh -``` - -You will also see the message lane relayers listening for new messages. - -``` -# Message Relayer Logs -[Millau_to_Rialto_MessageLane_00000000] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about best message nonces -[...] [date] INFO bridge Synced Some(2) of Some(3) nonces in Millau::MessagesDelivery -> Rialto::MessagesDelivery race -[...] [date] DEBUG bridge Asking Millau::MessagesDelivery about message nonces -[...] [date] DEBUG bridge Received best nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { - latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about finalized message nonces -[...] [date] DEBUG bridge Received finalized nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { - latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Received nonces from Millau::MessagesDelivery: SourceClientNonces { new_nonces: {}, confirmed_nonce: Some(0) } -[...] [date] DEBUG bridge Asking Millau node about its state -[...] [date] DEBUG bridge Received state from Millau node: ClientState { best_self: HeaderId(1593, 0xacac***), best_finalized_self: - HeaderId(1590, 0x0be81d...), best_finalized_peer_at_best_self: HeaderId(0, 0xdcdd89...) } -``` - -To send a message see the ["How to send a message" section](#how-to-send-a-message). - -### How to send a message - -In this section we'll show you how to quickly send a bridge message. The message is just an encoded XCM `Trap(43)` -message. - -```bash -# In `parity-bridges-common` folder -./scripts/send-message-from-millau-rialto.sh -``` - -After sending a message you will see the following logs showing a message was successfully sent: - -``` -INFO bridge Sending message to Rialto. Size: 11. -TRACE bridge Sent transaction to Millau node: 0x5e68... -``` - -And at the Rialto node logs you'll something like this: - -``` -... runtime::bridge-messages: Received messages: total=1, valid=1. Weight used: Weight(ref_time: 1215065371, proof_size: - 48559)/Weight(ref_time: 1215065371, proof_size: 54703). -``` - -It means that the message has been delivered and dispatched. Message may be dispatched with an error, though - the goal -of our test bridge is to ensure that messages are successfully delivered and all involved components are working. - -## Full Network Docker Compose Setup - -For a more sophisticated deployment which includes bidirectional header sync, message passing, monitoring dashboards, -etc. see the [Deployments README](./deployments/README.md). - -You should note that you can find images for all the bridge components published on [Docker -Hub](https://hub.docker.com/u/paritytech). - -To run a Rialto node for example, you can use the following command: - -```bash -docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ - -it paritytech/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external -``` - -## Community - -Main hangout for the community is [Element](https://element.io/) (formerly Riot). Element is a chat server like, for -example, Discord. Most discussions around Polkadot and Substrate happen in various Element "rooms" (channels). So, -joining Element might be a good idea, anyway. - -If you are interested in information exchange and development of Polkadot related bridges please feel free to join the -[Polkadot Bridges](https://app.element.io/#/room/#bridges:web3.foundation) Element channel. - -The [Substrate Technical](https://app.element.io/#/room/#substrate-technical:matrix.org) Element channel is most suited -for discussions regarding Substrate itself. +Apart from live Rococo <> Westend bridge, you may spin up local networks and test see how it works locally. More +details may be found in +[this document](https://github.com/paritytech/polkadot-sdk/tree/master//cumulus/parachains/runtimes/bridge-hubs/README.md). diff --git a/bridges/bin/runtime-common/src/lib.rs b/bridges/bin/runtime-common/src/lib.rs index ae6f40b1421..d3b3b21061d 100644 --- a/bridges/bin/runtime-common/src/lib.rs +++ b/bridges/bin/runtime-common/src/lib.rs @@ -22,7 +22,6 @@ use crate::messages_call_ext::MessagesCallSubType; use pallet_bridge_grandpa::CallSubType as GrandpaCallSubType; use pallet_bridge_parachains::CallSubType as ParachainsCallSubtype; use sp_runtime::transaction_validity::TransactionValidity; -use xcm::v3::NetworkId; pub mod messages; pub mod messages_api; @@ -92,8 +91,8 @@ where /// ```nocompile /// generate_bridge_reject_obsolete_headers_and_messages!{ /// Call, AccountId -/// BridgeRialtoGrandpa, BridgeWestendGrandpa, -/// BridgeRialtoParachains +/// BridgeRococoGrandpa, BridgeRococoMessages, +/// BridgeRococoParachains /// } /// ``` /// @@ -147,42 +146,6 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { }; } -/// A mapping over `NetworkId`. -/// Since `NetworkId` doesn't include `Millau`, `Rialto` and `RialtoParachain`, we create some -/// synthetic associations between these chains and `NetworkId` chains. -pub enum CustomNetworkId { - /// The Millau network ID, associated with Kusama. - Millau, - /// The Rialto network ID, associated with Polkadot. - Rialto, - /// The RialtoParachain network ID, associated with Westend. - RialtoParachain, -} - -impl TryFrom for CustomNetworkId { - type Error = (); - - fn try_from(chain: bp_runtime::ChainId) -> Result { - Ok(match chain { - bp_runtime::MILLAU_CHAIN_ID => Self::Millau, - bp_runtime::RIALTO_CHAIN_ID => Self::Rialto, - bp_runtime::RIALTO_PARACHAIN_CHAIN_ID => Self::RialtoParachain, - _ => return Err(()), - }) - } -} - -impl CustomNetworkId { - /// Converts self to XCM' network id. - pub const fn as_network_id(&self) -> NetworkId { - match *self { - CustomNetworkId::Millau => NetworkId::Kusama, - CustomNetworkId::Rialto => NetworkId::Polkadot, - CustomNetworkId::RialtoParachain => NetworkId::Westend, - } - } -} - #[cfg(test)] mod tests { use crate::BridgeRuntimeFilterCall; diff --git a/bridges/bin/runtime-common/src/mock.rs b/bridges/bin/runtime-common/src/mock.rs index 67ae974668e..ffabf7f6e2f 100644 --- a/bridges/bin/runtime-common/src/mock.rs +++ b/bridges/bin/runtime-common/src/mock.rs @@ -14,12 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! A mock runtime for testing different stuff in the crate. We've been using Millau -//! runtime for that before, but it has two drawbacks: -//! -//! - circular dependencies between this crate and Millau runtime; -//! -//! - we can't use (e.g. as git subtree or by copying) this crate in repo without Millau. +//! A mock runtime for testing different stuff in the crate. #![cfg(test)] @@ -44,13 +39,13 @@ use bp_runtime::{ }; use codec::{Decode, Encode}; use frame_support::{ - parameter_types, + derive_impl, parameter_types, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight}, }; use pallet_transaction_payment::Multiplier; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8, IdentityLookup}, + traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8}, FixedPointNumber, Perquintill, }; @@ -146,30 +141,14 @@ parameter_types! { pub const ReserveId: [u8; 8] = *b"brdgrlrs"; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Hash = ThisChainHash; type Hashing = ThisChainHasher; type AccountId = ThisChainAccountId; - type Lookup = IdentityLookup; type Block = ThisChainBlock; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU32<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; + type BlockHashCount = ConstU32<250>; } impl pallet_utility::Config for TestRuntime { @@ -179,21 +158,10 @@ impl pallet_utility::Config for TestRuntime { type WeightInfo = (); } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type Balance = ThisChainBalance; - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - type MaxLocks = ConstU32<50>; - type MaxReserves = ConstU32<50>; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type AccountStore = System; } impl pallet_transaction_payment::Config for TestRuntime { diff --git a/bridges/bin/runtime-common/src/priority_calculator.rs b/bridges/bin/runtime-common/src/priority_calculator.rs index fd103448125..a597fb9e2f4 100644 --- a/bridges/bin/runtime-common/src/priority_calculator.rs +++ b/bridges/bin/runtime-common/src/priority_calculator.rs @@ -27,6 +27,7 @@ use frame_support::traits::Get; use sp_runtime::transaction_validity::TransactionPriority; // reexport everything from `integrity_tests` module +#[allow(unused_imports)] pub use integrity_tests::*; /// Compute priority boost for message delivery transaction that delivers diff --git a/bridges/docs/high-level-overview.md b/bridges/docs/high-level-overview.md index 42efc8100bd..d6d6fb3f099 100644 --- a/bridges/docs/high-level-overview.md +++ b/bridges/docs/high-level-overview.md @@ -1,7 +1,7 @@ # High-Level Bridge Documentation This document gives a brief, abstract description of main components that may be found in this repository. If you want -to see how we're using them to build Rococo <> Wococo (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> +to see how we're using them to build Rococo <> Westend (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> Kusama Bridge](./polkadot-kusama-bridge-overview.md). ## Purpose diff --git a/bridges/modules/grandpa/src/mock.rs b/bridges/modules/grandpa/src/mock.rs index f88a0a3e6a6..7efa84971fe 100644 --- a/bridges/modules/grandpa/src/mock.rs +++ b/bridges/modules/grandpa/src/mock.rs @@ -20,16 +20,9 @@ use bp_header_chain::ChainWithGrandpa; use bp_runtime::Chain; use frame_support::{ - construct_runtime, parameter_types, - traits::{ConstU32, ConstU64, Hooks}, - weights::Weight, + construct_runtime, derive_impl, parameter_types, traits::Hooks, weights::Weight, }; use sp_core::sr25519::Signature; -use sp_runtime::{ - testing::H256, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; pub type AccountId = u64; pub type TestHeader = sp_runtime::testing::Header; @@ -49,43 +42,14 @@ construct_runtime! { } } -parameter_types! { - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { pub const MaxFreeMandatoryHeadersPerBlock: u32 = 2; pub const HeadersToKeep: u32 = 5; - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; } impl grandpa::Config for TestRuntime { diff --git a/bridges/modules/grandpa/src/weights.rs b/bridges/modules/grandpa/src/weights.rs index 89ed70d13ac..a75e7b5a8e4 100644 --- a/bridges/modules/grandpa/src/weights.rs +++ b/bridges/modules/grandpa/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev @@ -58,39 +58,39 @@ pub trait WeightInfo { /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoGrandpa PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa RequestCount (r:1 w:1) + /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: 499, - /// mode: MaxEncodedLen) + /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: + /// 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa BestFinalized (r:1 w:1) + /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: + /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: /// 531, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa CurrentAuthoritySet (r:1 w:0) + /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), + /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), /// added: 704, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashesPointer (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), + /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), /// added: 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashes (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), + /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), /// added: 2016, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:0 w:2) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 4]`. @@ -113,39 +113,39 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoGrandpa PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa RequestCount (r:1 w:1) + /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: 499, - /// mode: MaxEncodedLen) + /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: + /// 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa BestFinalized (r:1 w:1) + /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: + /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: /// 531, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa CurrentAuthoritySet (r:1 w:0) + /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), + /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), /// added: 704, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashesPointer (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), + /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), /// added: 499, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHashes (r:1 w:1) + /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) /// - /// Proof: BridgeRialtoGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), + /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), /// added: 2016, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:0 w:2) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 4]`. diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index d3d68b33802..a5c86693309 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -22,7 +22,6 @@ bp-runtime = { path = "../../primitives/runtime", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } frame-support = { path = "../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../substrate/frame/system", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-std = { path = "../../../substrate/primitives/std", default-features = false } @@ -43,7 +42,6 @@ std = [ "log/std", "num-traits/std", "scale-info/std", - "sp-core/std", "sp-runtime/std", "sp-std/std", ] diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/mock.rs index e98f9e1f5de..648acad772d 100644 --- a/bridges/modules/messages/src/mock.rs +++ b/bridges/modules/messages/src/mock.rs @@ -34,16 +34,11 @@ use bp_messages::{ use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode}; use frame_support::{ - parameter_types, - traits::ConstU64, + derive_impl, parameter_types, weights::{constants::RocksDbWeight, Weight}, }; use scale_info::TypeInfo; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, Perbill, -}; +use sp_runtime::BuildStorage; use std::{ collections::{BTreeMap, VecDeque}, ops::RangeInclusive, @@ -84,56 +79,19 @@ frame_support::construct_runtime! { } } -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - pub type DbWeight = RocksDbWeight; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ConstU64<1>; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = (); - type ReserveIdentifier = (); - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type ReserveIdentifier = [u8; 8]; + type AccountStore = System; } parameter_types! { diff --git a/bridges/modules/messages/src/weights.rs b/bridges/modules/messages/src/weights.rs index 5b6863984ec..5bf7d567560 100644 --- a/bridges/modules/messages/src/weights.rs +++ b/bridges/modules/messages/src/weights.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Autogenerated weights for RialtoMessages +//! Autogenerated weights for pallet_bridge_messages //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev //! DATE: 2023-03-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` @@ -23,13 +23,13 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev // --steps=50 // --repeat=20 -// --pallet=RialtoMessages +// --pallet=pallet_bridge_messages // --extrinsic=* // --execution=wasm // --wasm-execution=Compiled @@ -48,7 +48,7 @@ use frame_support::{ }; use sp_std::marker::PhantomData; -/// Weight functions needed for RialtoMessages. +/// Weight functions needed for pallet_bridge_messages. pub trait WeightInfo { fn receive_single_message_proof() -> Weight; fn receive_two_messages_proof() -> Weight; @@ -61,24 +61,24 @@ pub trait WeightInfo { fn receive_single_message_proof_with_dispatch(i: u32) -> Weight; } -/// Weights for `RialtoMessages` that are generated using one of the Bridge testnets. +/// Weights for `pallet_bridge_messages` that are generated using one of the Bridge testnets. /// /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: @@ -89,19 +89,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: @@ -112,19 +112,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: @@ -135,19 +135,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: @@ -158,19 +158,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: @@ -181,19 +181,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -209,19 +209,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -237,19 +237,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) @@ -265,19 +265,19 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// /// The range of component `i` is `[128, 2048]`. @@ -296,19 +296,19 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: @@ -319,19 +319,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: @@ -342,19 +342,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: @@ -365,19 +365,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: @@ -388,19 +388,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: @@ -411,19 +411,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -439,19 +439,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) @@ -467,19 +467,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages OutboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: + /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: /// 539, mode: MaxEncodedLen) /// /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) @@ -495,19 +495,19 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoMessages PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), + /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), /// added: 497, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoMessages InboundLanes (r:1 w:1) + /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) /// - /// Proof: BridgeRialtoMessages InboundLanes (max_values: None, max_size: Some(49180), added: + /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: /// 51655, mode: MaxEncodedLen) /// /// The range of component `i` is `[128, 2048]`. diff --git a/bridges/modules/parachains/src/mock.rs b/bridges/modules/parachains/src/mock.rs index 14afe384171..d95e76f3108 100644 --- a/bridges/modules/parachains/src/mock.rs +++ b/bridges/modules/parachains/src/mock.rs @@ -17,17 +17,18 @@ use bp_header_chain::ChainWithGrandpa; use bp_polkadot_core::parachains::ParaId; use bp_runtime::{Chain, Parachain}; -use frame_support::{construct_runtime, parameter_types, traits::ConstU32, weights::Weight}; +use frame_support::{ + construct_runtime, derive_impl, parameter_types, traits::ConstU32, weights::Weight, +}; use sp_runtime::{ testing::H256, - traits::{BlakeTwo256, Header as HeaderT, IdentityLookup}, - MultiSignature, Perbill, + traits::{BlakeTwo256, Header as HeaderT}, + MultiSignature, }; use crate as pallet_bridge_parachains; pub type AccountId = u64; -pub type TestNumber = u64; pub type RelayBlockHeader = sp_runtime::generic::Header; @@ -152,42 +153,12 @@ construct_runtime! { } } -parameter_types! { - pub const BlockHashCount: TestNumber = 250; - pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0); - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = RegularParachainHasher; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } parameter_types! { - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; pub const HeadersToKeep: u32 = 5; } diff --git a/bridges/modules/parachains/src/weights.rs b/bridges/modules/parachains/src/weights.rs index 9182ec46611..abddc876894 100644 --- a/bridges/modules/parachains/src/weights.rs +++ b/bridges/modules/parachains/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/unknown-bridge-node // benchmark // pallet // --chain=dev @@ -60,29 +60,29 @@ pub trait WeightInfo { /// Those weights are test only and must never be used in production. pub struct BridgeWeight(PhantomData); impl WeightInfo for BridgeWeight { - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. @@ -97,29 +97,29 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: @@ -130,29 +130,29 @@ impl WeightInfo for BridgeWeight { .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: @@ -167,29 +167,29 @@ impl WeightInfo for BridgeWeight { // For backwards compatibility and tests impl WeightInfo for () { - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) /// /// The range of component `p` is `[1, 2]`. @@ -204,29 +204,29 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: @@ -237,29 +237,29 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } - /// Storage: BridgeRialtoParachains PalletOperatingMode (r:1 w:0) + /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) /// - /// Proof: BridgeRialtoParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), + /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), /// added: 496, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoGrandpa ImportedHeaders (r:1 w:0) + /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) /// - /// Proof: BridgeRialtoGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), + /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), /// added: 2048, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ParasInfo (r:1 w:1) + /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: + /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: /// 555, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHashes (r:1 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHashes (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: /// Some(64), added: 1549, mode: MaxEncodedLen) /// - /// Storage: BridgeRialtoParachains ImportedParaHeads (r:0 w:1) + /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) /// - /// Proof: BridgeRialtoParachains ImportedParaHeads (max_values: Some(1024), max_size: + /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: /// Some(196), added: 1681, mode: MaxEncodedLen) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: diff --git a/bridges/modules/parachains/src/weights_ext.rs b/bridges/modules/parachains/src/weights_ext.rs index 13bc9ad2bbc..393086a8569 100644 --- a/bridges/modules/parachains/src/weights_ext.rs +++ b/bridges/modules/parachains/src/weights_ext.rs @@ -31,7 +31,7 @@ use frame_support::weights::{RuntimeDbWeight, Weight}; pub const DEFAULT_PARACHAIN_HEAD_SIZE: u32 = 384; /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// the Rialto chain. +/// some generic chain. pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; /// Extended weight info. diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 10b60c3006b..6ec1971e3f6 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -30,7 +30,6 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false [dev-dependencies] bp-runtime = { path = "../../primitives/runtime" } pallet-balances = { path = "../../../substrate/frame/balances" } -sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } sp-runtime = { path = "../../../substrate/primitives/runtime" } diff --git a/bridges/modules/relayers/src/mock.rs b/bridges/modules/relayers/src/mock.rs index d19d47eec5c..667b10e5c12 100644 --- a/bridges/modules/relayers/src/mock.rs +++ b/bridges/modules/relayers/src/mock.rs @@ -22,12 +22,10 @@ use bp_messages::LaneId; use bp_relayers::{ PayRewardFromAccount, PaymentProcedure, RewardsAccountOwner, RewardsAccountParams, }; -use frame_support::{parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight}; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU32, IdentityLookup}, - BuildStorage, +use frame_support::{ + derive_impl, parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight, }; +use sp_runtime::BuildStorage; pub type AccountId = u64; pub type Balance = u64; @@ -61,47 +59,17 @@ parameter_types! { pub const Lease: BlockNumber = 8; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } +#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = ConstU32<1>; type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; + type AccountStore = System; } impl pallet_bridge_relayers::Config for TestRuntime { diff --git a/bridges/modules/relayers/src/weights.rs b/bridges/modules/relayers/src/weights.rs index 2e064a3936d..c2c065b0c0a 100644 --- a/bridges/modules/relayers/src/weights.rs +++ b/bridges/modules/relayers/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/rip-bridge-node // benchmark // pallet // --chain=dev diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index 2152b4eb28f..2d173ebc045 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -19,13 +19,9 @@ use crate as pallet_xcm_bridge_hub_router; use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; -use frame_support::{construct_runtime, parameter_types}; +use frame_support::{construct_runtime, derive_impl, parameter_types}; use frame_system::EnsureRoot; -use sp_core::H256; -use sp_runtime::{ - traits::{BlakeTwo256, ConstU128, IdentityLookup}, - BuildStorage, -}; +use sp_runtime::{traits::ConstU128, BuildStorage}; use xcm::prelude::*; use xcm_builder::{NetworkExportTable, NetworkExportTableItem}; @@ -64,30 +60,9 @@ parameter_types! { ]; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for TestRuntime { - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; type Block = Block; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = frame_support::traits::ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { diff --git a/bridges/modules/xcm-bridge-hub-router/src/weights.rs b/bridges/modules/xcm-bridge-hub-router/src/weights.rs index 62936e997f3..b0c8fc6252c 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/weights.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/weights.rs @@ -23,7 +23,7 @@ //! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 // Executed Command: -// target/release/millau-bridge-node +// target/release/rip-bridge-node // benchmark // pallet // --chain=dev diff --git a/bridges/primitives/chain-asset-hub-kusama/Cargo.toml b/bridges/primitives/chain-asset-hub-kusama/Cargo.toml deleted file mode 100644 index 3e53f9407ff..00000000000 --- a/bridges/primitives/chain-asset-hub-kusama/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "bp-asset-hub-kusama" -description = "Primitives of AssetHubKusama parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", -] diff --git a/bridges/primitives/chain-asset-hub-kusama/src/lib.rs b/bridges/primitives/chain-asset-hub-kusama/src/lib.rs deleted file mode 100644 index 94016c1da0c..00000000000 --- a/bridges/primitives/chain-asset-hub-kusama/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubKusama runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubKusama` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubKusama` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubKusama` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToPolkadotXcmRouter` bridge pallet. - #[codec(index = 43)] - ToPolkadotXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); - - /// Base delivery fee to `BridgeHubKusama`. - /// (initially was calculated `170733333` + `10%` by test `BridgeHubKusama::can_calculate_weight_for_paid_export_message_with_reserve_transfer`) - pub const BridgeHubKusamaBaseFeeInDots: u128 = 187806666; -} diff --git a/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml b/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml deleted file mode 100644 index 9c1b1a1f326..00000000000 --- a/bridges/primitives/chain-asset-hub-polkadot/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "bp-asset-hub-polkadot" -description = "Primitives of AssetHubPolkadot parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-runtime/std", -] diff --git a/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs b/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs deleted file mode 100644 index 486fba60e1f..00000000000 --- a/bridges/primitives/chain-asset-hub-polkadot/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubPolkadot runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubPolkadot` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubPolkadot` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubPolkadot` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToKusamaXcmRouter` bridge pallet. - #[codec(index = 43)] - ToKusamaXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); - - /// Base delivery fee to `BridgeHubPolkadot`. - /// (initially was calculated `51220000` + `10%` by test `BridgeHubPolkadot::can_calculate_weight_for_paid_export_message_with_reserve_transfer`) - pub const BridgeHubPolkadotBaseFeeInDots: u128 = 56342000; -} diff --git a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs b/bridges/primitives/chain-asset-hub-rococo/src/lib.rs index 6216b24d75c..de2e9ae856d 100644 --- a/bridges/primitives/chain-asset-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-asset-hub-rococo/src/lib.rs @@ -34,9 +34,6 @@ pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; #[allow(clippy::large_enum_variant)] #[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum Call { - /// `ToWococoXcmRouter` bridge pallet. - #[codec(index = 43)] - ToWococoXcmRouter(XcmBridgeHubRouterCall), /// `ToWestendXcmRouter` bridge pallet. #[codec(index = 45)] ToWestendXcmRouter(XcmBridgeHubRouterCall), diff --git a/bridges/primitives/chain-asset-hub-wococo/Cargo.toml b/bridges/primitives/chain-asset-hub-wococo/Cargo.toml deleted file mode 100644 index e1a5a262157..00000000000 --- a/bridges/primitives/chain-asset-hub-wococo/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "bp-asset-hub-wococo" -description = "Primitives of AssetHubWococo parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { path = "../../../substrate/frame/support", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", -] diff --git a/bridges/primitives/chain-asset-hub-wococo/src/lib.rs b/bridges/primitives/chain-asset-hub-wococo/src/lib.rs deleted file mode 100644 index c04eb04cce7..00000000000 --- a/bridges/primitives/chain-asset-hub-wococo/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubWococo runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubWococo` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubWococo` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubWococo` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToRococoXcmRouter` bridge pallet. - #[codec(index = 44)] - ToRococoXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); -} - -/// Identifier of AssetHubWococo in the Wococo relay chain. -pub const ASSET_HUB_WOCOCO_PARACHAIN_ID: u32 = 1000; diff --git a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml index 24cf7236d45..46697913674 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-cumulus" -description = "Primitives of BridgeHubRococo parachain runtime." +description = "Primitives for BridgeHub parachain runtimes." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml index 387f5e8ade6..c4cd229ef43 100644 --- a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-kusama" -description = "Primitives of BridgeHubRococo parachain runtime." +description = "Primitives of BridgeHubKusama parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml index 40b386e22d2..4913d87e5fb 100644 --- a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bp-bridge-hub-polkadot" -description = "Primitives of BridgeHubWococo parachain runtime." +description = "Primitives of BridgeHubPolkadot parachain runtime." version = "0.1.0" authors.workspace = true edition.workspace = true diff --git a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs index e72e711de92..59d293edf1c 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs +++ b/bridges/primitives/chain-bridge-hub-rococo/src/lib.rs @@ -74,9 +74,6 @@ pub const WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME: &str = "BridgeRococoMessa /// chains. pub const WITH_BRIDGE_HUB_ROCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; -/// Pallet index of `BridgeWococoMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX: u8 = 46; - /// Pallet index of `BridgeWestendMessages: pallet_bridge_messages::`. pub const WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX: u8 = 51; diff --git a/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml b/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml deleted file mode 100644 index 17c134f4412..00000000000 --- a/bridges/primitives/chain-bridge-hub-wococo/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "bp-bridge-hub-wococo" -description = "Primitives of BridgeHubWococo parachain runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-messages = { path = "../messages", default-features = false } - -# Substrate Based Dependencies - -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-bridge-hub-cumulus/std", - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs b/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs deleted file mode 100644 index c8bd397cec5..00000000000 --- a/bridges/primitives/chain-bridge-hub-wococo/src/lib.rs +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects BridgeHubWococo runtime setup -//! (AccountId, Headers, Hashes...) - -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_bridge_hub_cumulus::*; -use bp_messages::*; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, Parachain, -}; -use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; - -/// BridgeHubWococo parachain. -#[derive(RuntimeDebug)] -pub struct BridgeHubWococo; - -impl Chain for BridgeHubWococo { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl Parachain for BridgeHubWococo { - const PARACHAIN_ID: u32 = BRIDGE_HUB_WOCOCO_PARACHAIN_ID; -} - -/// Identifier of BridgeHubWococo in the Wococo relay chain. -pub const BRIDGE_HUB_WOCOCO_PARACHAIN_ID: u32 = 1014; - -/// Name of the With-BridgeHubWococo messages pallet instance that is deployed at bridged chains. -pub const WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME: &str = "BridgeWococoMessages"; - -/// Name of the With-BridgeHubWococo bridge-relayers pallet instance that is deployed at bridged -/// chains. -pub const WITH_BRIDGE_HUB_WOCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; - -/// Pallet index of `BridgeRococoMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX: u8 = 45; - -decl_bridge_finality_runtime_apis!(bridge_hub_wococo); -decl_bridge_messages_runtime_apis!(bridge_hub_wococo); - -frame_support::parameter_types! { - /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Wococo - /// BridgeHub. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) - pub const BridgeHubWococoBaseXcmFeeInWocs: u128 = 1624803349; - - /// Transaction fee that is paid at the Wococo BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubWococoBaseDeliveryFeeInWocs: u128 = 6417262881; - - /// Transaction fee that is paid at the Wococo BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubWococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubWococoBaseConfirmationFeeInWocs: u128 = 6159996668; -} diff --git a/bridges/primitives/chain-wococo/Cargo.toml b/bridges/primitives/chain-wococo/Cargo.toml deleted file mode 100644 index 05901821b36..00000000000 --- a/bridges/primitives/chain-wococo/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "bp-wococo" -description = "Primitives of Wococo runtime." -version = "0.1.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -bp-rococo = { path = "../chain-rococo", default-features = false } - -# Substrate Based Dependencies - -frame-support = { path = "../../../substrate/frame/support", default-features = false } -sp-api = { path = "../../../substrate/primitives/api", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-rococo/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-std/std", -] diff --git a/bridges/primitives/chain-wococo/src/lib.rs b/bridges/primitives/chain-wococo/src/lib.rs deleted file mode 100644 index b1df65630be..00000000000 --- a/bridges/primitives/chain-wococo/src/lib.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -pub use bp_polkadot_core::*; -pub use bp_rococo::{ - SS58Prefix, MAX_AUTHORITIES_COUNT, MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE, PARAS_PALLET_NAME, -}; - -use bp_header_chain::ChainWithGrandpa; -use bp_runtime::{decl_bridge_finality_runtime_apis, Chain}; -use frame_support::weights::Weight; - -/// Wococo Chain -pub struct Wococo; - -impl Chain for Wococo { - type BlockNumber = ::BlockNumber; - type Hash = ::Hash; - type Hasher = ::Hasher; - type Header = ::Header; - - type AccountId = ::AccountId; - type Balance = ::Balance; - type Nonce = ::Nonce; - type Signature = ::Signature; - - fn max_extrinsic_size() -> u32 { - PolkadotLike::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - PolkadotLike::max_extrinsic_weight() - } -} - -impl ChainWithGrandpa for Wococo { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WOCOCO_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_HEADER_SIZE: u32 = MAX_HEADER_SIZE; - const AVERAGE_HEADER_SIZE_IN_JUSTIFICATION: u32 = AVERAGE_HEADER_SIZE_IN_JUSTIFICATION; -} - -// The SignedExtension used by Wococo. -pub use bp_rococo::CommonSignedExtension as SignedExtension; - -/// Name of the With-Wococo GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_WOCOCO_GRANDPA_PALLET_NAME: &str = "BridgeWococoGrandpa"; - -decl_bridge_finality_runtime_apis!(wococo, grandpa); diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs index e1809e14524..b78023efb1b 100644 --- a/bridges/primitives/runtime/src/chain.rs +++ b/bridges/primitives/runtime/src/chain.rs @@ -280,7 +280,7 @@ pub type TransactionEraOf = crate::TransactionEra, HashOf /// - constants that are stringified names of runtime API methods: /// - `BEST_FINALIZED__HEADER_METHOD` /// - `_ACCEPTED__FINALITY_PROOFS_METHOD` -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_finality_runtime_apis { ($chain: ident $(, $consensus: ident => $justification_type: ty)?) => { @@ -332,7 +332,7 @@ macro_rules! decl_bridge_finality_runtime_apis { /// - `FromInboundLaneApi` /// - constants that are stringified names of runtime API methods: /// - `FROM__MESSAGE_DETAILS_METHOD`, -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_messages_runtime_apis { ($chain: ident) => { @@ -390,7 +390,7 @@ macro_rules! decl_bridge_messages_runtime_apis { /// Convenience macro that declares bridge finality runtime apis, bridge messages runtime apis /// and related constants for a chain. -/// The name of the chain has to be specified in snake case (e.g. `rialto_parachain`). +/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). #[macro_export] macro_rules! decl_bridge_runtime_apis { ($chain: ident $(, $consensus: ident)?) => { diff --git a/bridges/primitives/runtime/src/extensions.rs b/bridges/primitives/runtime/src/extensions.rs index 44eeaad93c9..8a618721b23 100644 --- a/bridges/primitives/runtime/src/extensions.rs +++ b/bridges/primitives/runtime/src/extensions.rs @@ -88,7 +88,7 @@ pub type BridgeRejectObsoleteHeadersAndMessages = GenericSignedExtensionSchema<( /// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), /// ())` is the same. So runtime can contains any kind of tuple: /// `(BridgeRefundBridgeHubRococoMessages)` -/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWococoMessages)` +/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` /// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` pub type RefundBridgedParachainMessagesSchema = GenericSignedExtensionSchema<(), ()>; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index e5277d8db6a..0513cfa2a6c 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -61,15 +61,6 @@ pub use sp_runtime::paste; /// Use this when something must be shared among all instances. pub const NO_INSTANCE_ID: ChainId = [0, 0, 0, 0]; -/// Rialto chain id. -pub const RIALTO_CHAIN_ID: ChainId = *b"rlto"; - -/// RialtoParachain chain id. -pub const RIALTO_PARACHAIN_CHAIN_ID: ChainId = *b"rlpa"; - -/// Millau chain id. -pub const MILLAU_CHAIN_ID: ChainId = *b"mlau"; - /// Polkadot chain id. pub const POLKADOT_CHAIN_ID: ChainId = *b"pdot"; @@ -88,15 +79,9 @@ pub const ASSET_HUB_WESTEND_CHAIN_ID: ChainId = *b"ahwe"; /// Rococo chain id. pub const ROCOCO_CHAIN_ID: ChainId = *b"roco"; -/// Wococo chain id. -pub const WOCOCO_CHAIN_ID: ChainId = *b"woco"; - /// BridgeHubRococo chain id. pub const BRIDGE_HUB_ROCOCO_CHAIN_ID: ChainId = *b"bhro"; -/// BridgeHubWococo chain id. -pub const BRIDGE_HUB_WOCOCO_CHAIN_ID: ChainId = *b"bhwo"; - /// BridgeHubWestend chain id. pub const BRIDGE_HUB_WESTEND_CHAIN_ID: ChainId = *b"bhwd"; @@ -277,18 +262,6 @@ pub fn storage_map_final_key( StorageKey(final_key) } -/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false; -/// }`) is computed. -/// -/// Copied from `frame_support::parameter_types` macro. -pub fn storage_parameter_key(parameter_name: &str) -> StorageKey { - let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1); - buffer.push(b':'); - buffer.extend_from_slice(parameter_name.as_bytes()); - buffer.push(b':'); - StorageKey(sp_io::hashing::twox_128(&buffer).to_vec()) -} - /// This is how a storage key of storage value is computed. /// /// Copied from `frame_support::storage::storage_prefix`. @@ -574,14 +547,6 @@ where mod tests { use super::*; - #[test] - fn storage_parameter_key_works() { - assert_eq!( - storage_parameter_key("MillauToRialtoConversionRate"), - StorageKey(hex_literal::hex!("58942375551bb0af1682f72786b59d04").to_vec()), - ); - } - #[test] fn storage_value_key_works() { assert_eq!( diff --git a/bridges/scripts/verify-pallets-build.sh b/bridges/scripts/verify-pallets-build.sh index e797f77d026..b96bbf1833b 100755 --- a/bridges/scripts/verify-pallets-build.sh +++ b/bridges/scripts/verify-pallets-build.sh @@ -61,19 +61,12 @@ trap revert_to_clean_state EXIT rm -rf $BRIDGES_FOLDER/.config rm -rf $BRIDGES_FOLDER/.github rm -rf $BRIDGES_FOLDER/.maintain -rm -rf $BRIDGES_FOLDER/bin/millau -rm -rf $BRIDGES_FOLDER/bin/rialto -rm -rf $BRIDGES_FOLDER/bin/rialto-parachain -rm -rf $BRIDGES_FOLDER/bin/.keep rm -rf $BRIDGES_FOLDER/deployments rm -f $BRIDGES_FOLDER/docs/dockerhub-* rm -rf $BRIDGES_FOLDER/fuzz rm -rf $BRIDGES_FOLDER/modules/beefy rm -rf $BRIDGES_FOLDER/modules/shift-session-manager rm -rf $BRIDGES_FOLDER/primitives/beefy -rm -rf $BRIDGES_FOLDER/primitives/chain-millau -rm -rf $BRIDGES_FOLDER/primitives/chain-rialto -rm -rf $BRIDGES_FOLDER/primitives/chain-rialto-parachain rm -rf $BRIDGES_FOLDER/relays rm -rf $BRIDGES_FOLDER/scripts/add_license.sh rm -rf $BRIDGES_FOLDER/scripts/build-containers.sh @@ -81,8 +74,6 @@ rm -rf $BRIDGES_FOLDER/scripts/ci-cache.sh rm -rf $BRIDGES_FOLDER/scripts/dump-logs.sh rm -rf $BRIDGES_FOLDER/scripts/license_header rm -rf $BRIDGES_FOLDER/scripts/regenerate_runtimes.sh -rm -rf $BRIDGES_FOLDER/scripts/send-message-from-millau-rialto.sh -rm -rf $BRIDGES_FOLDER/scripts/send-message-from-rialto-millau.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights.sh rm -rf $BRIDGES_FOLDER/scripts/update-weights-setup.sh rm -rf $BRIDGES_FOLDER/scripts/update_substrate.sh diff --git a/bridges/zombienet/README.md b/bridges/zombienet/README.md index 7f7de770814..b601154b624 100644 --- a/bridges/zombienet/README.md +++ b/bridges/zombienet/README.md @@ -1,4 +1,4 @@ -# Bridges Tests for Local Rococo <> Wococo Bridge +# Bridges Tests for Local Rococo <> Westend Bridge This folder contains [zombienet](https://github.com/paritytech/zombienet/) based integration tests for both onchain and offchain bridges code. Due to some @@ -9,7 +9,7 @@ To start those tests, you need to: - download latest [zombienet release](https://github.com/paritytech/zombienet/releases); -- build Polkadot binary by running `cargo build -p polkadot --release` command in the +- build Polkadot binary by running `cargo build -p polkadot --release --features fast-runtime` command in the [`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; - build Polkadot Parachain binary by running `cargo build -p polkadot-parachain-bin --release` command in the diff --git a/bridges/zombienet/helpers/wait-hrmp-channel-opened.js b/bridges/zombienet/helpers/wait-hrmp-channel-opened.js new file mode 100644 index 00000000000..e700cab1d74 --- /dev/null +++ b/bridges/zombienet/helpers/wait-hrmp-channel-opened.js @@ -0,0 +1,22 @@ +async function run(nodeName, networkInfo, args) { + const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + const sibling = args[0]; + + while (true) { + const messagingStateAsObj = await api.query.parachainSystem.relevantMessagingState(); + const messagingState = api.createType("Option", messagingStateAsObj); + if (messagingState.isSome) { + const egressChannels = messagingState.unwrap().egressChannels; + if (egressChannels.find(x => x[0] == sibling)) { + return; + } + } + + // else sleep and retry + await new Promise((resolve) => setTimeout(resolve, 12000)); + } +} + +module.exports = { run } diff --git a/bridges/zombienet/run-tests.sh b/bridges/zombienet/run-tests.sh index 1fdbc6b8d61..22fefd09360 100755 --- a/bridges/zombienet/run-tests.sh +++ b/bridges/zombienet/run-tests.sh @@ -11,11 +11,11 @@ export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_FOLDER/bridges/zombienet/tests export POLKADOT_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot export POLKADOT_PARACHAIN_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot-parachain export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=$POLKADOT_PARACHAIN_BINARY_PATH -export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO=$POLKADOT_PARACHAIN_BINARY_PATH +export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=$POLKADOT_PARACHAIN_BINARY_PATH export ZOMBIENET_BINARY_PATH=~/local_bridge_testing/bin/zombienet-linux # bridge configuration -export LANE_ID="00000001" +export LANE_ID="00000002" # tests configuration ALL_TESTS_FOLDER=`mktemp -d` diff --git a/bridges/zombienet/scripts/invoke-script.sh b/bridges/zombienet/scripts/invoke-script.sh index cb21d61ab91..6a3754a8824 100755 --- a/bridges/zombienet/scripts/invoke-script.sh +++ b/bridges/zombienet/scripts/invoke-script.sh @@ -1,5 +1,5 @@ #!/bin/bash pushd $POLKADOT_SDK_FOLDER/cumulus/scripts -./bridges_rococo_wococo.sh $1 +./bridges_rococo_westend.sh $1 popd diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl new file mode 100644 index 00000000000..f68d658cdac --- /dev/null +++ b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl @@ -0,0 +1,26 @@ +Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub +Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml +Creds: config + +# step 1: initialize Westend asset hub +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-westend-local" within 240 seconds +asset-hub-westend-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1002" within 400 seconds + +# step 2: initialize Westend bridge hub +bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 120 seconds + +# step 3: relay is started elsewhere - let's wait until with-Rococo GRANPDA pallet is initialized at Westend +bridge-hub-westend-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds + +# step 2: send WOC to Rococo +asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds + +# step 3: elsewhere Rococo has sent ROC to //Alice - let's wait for it +asset-hub-westend-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds + +# step 4: check that the relayer //Charlie is rewarded by both our AH and target AH +bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726f,BridgedChain,0" within 300 seconds +bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 300 seconds + +# wait until other network test has completed OR exit with an error too +asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl deleted file mode 100644 index a1af2625c1c..00000000000 --- a/bridges/zombienet/tests/0001-asset-transfer-works-rococo-to-wococo.zndsl +++ /dev/null @@ -1,25 +0,0 @@ -Description: User is able to transfer ROC from Rococo Asset Hub to Wococo Asset Hub -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml -Creds: config - -# step 1: initialize Wococo asset hub -asset-hub-wococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-wococo-local" within 120 seconds - -# step 2: initialize Wococo bridge hub -bridge-hub-wococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-wococo-local" within 120 seconds - -# step 3: relay is started elsewhere - let's wait until with-Rococo GRANPDA pallet is initialized at Wococo -bridge-hub-wococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds - -# step 2: send WOC to Rococo -asset-hub-wococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-wococo-local" within 60 seconds - -# step 3: elsewhere Rococo has sent ROC to //Alice - let's wait for it -asset-hub-wococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds - -# step 4: check that the relayer //Charlie is rewarded by both our AH and target AH -bridge-hub-wococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268726F,BridgedChain,0" within 300 seconds -bridge-hub-wococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268726F,ThisChain,0" within 300 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-wococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl similarity index 64% rename from bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl rename to bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl index ad2446d58ce..c862fa6d176 100644 --- a/bridges/zombienet/tests/0001-asset-transfer-works-wococo-to-rococo.zndsl +++ b/bridges/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl @@ -1,25 +1,26 @@ -Description: User is able to transfer WOC from Wococo Asset Hub to Rococo Asset Hub +Description: User is able to transfer WOC from Westend Asset Hub to Rococo Asset Hub Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml Creds: config # step 1: initialize Rococo asset hub -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 120 seconds +asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 240 seconds +asset-hub-rococo-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1013" within 400 seconds # step 2: initialize Rococo bridge hub bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 120 seconds -# step 3: relay is started elsewhere - let's wait until with-Wococo GRANPDA pallet is initialized at Rococo -bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Wococo,0" within 400 seconds +# step 3: relay is started elsewhere - let's wait until with-Westend GRANPDA pallet is initialized at Rococo +bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds -# step 4: send ROC to Wococo +# step 4: send ROC to Westend asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds -# step 5: elsewhere Wococo has sent WOC to //Alice - let's wait for it -asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Wococo" within 600 seconds +# step 5: elsewhere Westend has sent WOC to //Alice - let's wait for it +asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 600 seconds # step 6: check that the relayer //Charlie is rewarded by both our AH and target AH -bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268776F,BridgedChain,0" within 300 seconds -bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000001,0x6268776F,ThisChain,0" within 300 seconds +bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,BridgedChain,0" within 300 seconds +bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 300 seconds # wait until other network test has completed OR exit with an error too asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/bridges/zombienet/tests/0001-start-relay.sh b/bridges/zombienet/tests/0001-start-relay.sh old mode 100644 new mode 100755 index fc231fba895..7be2cf4d593 --- a/bridges/zombienet/tests/0001-start-relay.sh +++ b/bridges/zombienet/tests/0001-start-relay.sh @@ -1,5 +1,5 @@ #!/bin/bash pushd $POLKADOT_SDK_FOLDER/cumulus/scripts -./bridges_rococo_wococo.sh run-relay +./bridges_rococo_westend.sh run-relay popd diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs index 4af84c82e98..fa9a287adf8 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/genesis.rs @@ -56,19 +56,11 @@ pub fn genesis() -> Storage { safe_xcm_version: Some(SAFE_XCM_VERSION), ..Default::default() }, - bridge_wococo_grandpa: bridge_hub_rococo_runtime::BridgeWococoGrandpaConfig { + bridge_westend_grandpa: bridge_hub_rococo_runtime::BridgeWestendGrandpaConfig { owner: Some(get_account_id_from_seed::(accounts::BOB)), ..Default::default() }, - bridge_rococo_grandpa: bridge_hub_rococo_runtime::BridgeRococoGrandpaConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_rococo_messages: bridge_hub_rococo_runtime::BridgeRococoMessagesConfig { - owner: Some(get_account_id_from_seed::(accounts::BOB)), - ..Default::default() - }, - bridge_wococo_messages: bridge_hub_rococo_runtime::BridgeWococoMessagesConfig { + bridge_westend_messages: bridge_hub_rococo_runtime::BridgeWestendMessagesConfig { owner: Some(get_account_id_from_seed::(accounts::BOB)), ..Default::default() }, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index f4f33677d4a..0773eb0c8eb 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -86,9 +86,7 @@ assets-common = { path = "../common", default-features = false } pallet-xcm-bridge-hub-router = { path = "../../../../../bridges/modules/xcm-bridge-hub-router", default-features = false } bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-asset-hub-wococo = { path = "../../../../../bridges/primitives/chain-asset-hub-wococo", default-features = false } bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } [dev-dependencies] @@ -180,10 +178,8 @@ std = [ "assets-common/std", "bp-asset-hub-rococo/std", "bp-asset-hub-westend/std", - "bp-asset-hub-wococo/std", "bp-bridge-hub-rococo/std", "bp-bridge-hub-westend/std", - "bp-bridge-hub-wococo/std", "codec/std", "cumulus-pallet-aura-ext/std", "cumulus-pallet-dmp-queue/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 06dcfb99a65..4492971566b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -16,12 +16,6 @@ //! # Asset Hub Rococo Runtime //! //! Asset Hub Rococo, formerly known as "Rockmine", is the test network for its Kusama cousin. -//! -//! This runtime is also used for Asset Hub Wococo. But we dont want to create another exact copy of -//! Asset Hub Rococo, so we injected some tweaks backed by `RuntimeFlavor` and `pub storage Flavor: -//! RuntimeFlavor`. (For example this is needed for successful asset transfer between Asset Hub -//! Rococo and Asset Hub Wococo, where we need to have correct `xcm_config::UniversalLocation` with -//! correct `GlobalConsensus`. #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit = "256"] @@ -106,15 +100,6 @@ use crate::xcm_config::{ }; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; -/// Enum for handling differences in the runtime configuration for `AssetHubRococo` vs. -/// `AssetHubWococo`. -#[derive(Default, Eq, PartialEq, Debug, Clone, Copy, Decode, Encode)] -pub enum RuntimeFlavor { - #[default] - Rococo, - Wococo, -} - impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, @@ -866,73 +851,11 @@ impl pallet_nfts::Config for Runtime { type Helper = (); } -/// XCM router instance to BridgeHub with bridging capabilities for `Wococo` global -/// consensus with dynamic fees and back-pressure. -pub type ToWococoXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance1; -impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_wococo::WeightInfo; - - type UniversalLocation = xcm_config::UniversalLocation; - type BridgedNetworkId = xcm_config::bridging::to_wococo::WococoNetwork; - type Bridges = xcm_config::bridging::NetworkExportTable; - - #[cfg(not(feature = "runtime-benchmarks"))] - type BridgeHubOrigin = EnsureXcm>; - #[cfg(feature = "runtime-benchmarks")] - type BridgeHubOrigin = EitherOfDiverse< - // for running benchmarks - EnsureRoot, - // for running tests with `--feature runtime-benchmarks` - EnsureXcm>, - >; - - type ToBridgeHubSender = XcmpQueue; - type WithBridgeHubChannel = - cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider< - xcm_config::bridging::SiblingBridgeHubParaId, - Runtime, - >; - - type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; - type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; -} - -/// XCM router instance to BridgeHub with bridging capabilities for `Rococo` global -/// consensus with dynamic fees and back-pressure. -pub type ToRococoXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance2; -impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_rococo::WeightInfo; - - type UniversalLocation = xcm_config::UniversalLocation; - type BridgedNetworkId = xcm_config::bridging::to_rococo::RococoNetwork; - type Bridges = xcm_config::bridging::NetworkExportTable; - - #[cfg(not(feature = "runtime-benchmarks"))] - type BridgeHubOrigin = EnsureXcm>; - #[cfg(feature = "runtime-benchmarks")] - type BridgeHubOrigin = EitherOfDiverse< - // for running benchmarks - EnsureRoot, - // for running tests with `--feature runtime-benchmarks` - EnsureXcm>, - >; - - type ToBridgeHubSender = XcmpQueue; - type WithBridgeHubChannel = - cumulus_pallet_xcmp_queue::bridging::InAndOutXcmpChannelStatusProvider< - xcm_config::bridging::SiblingBridgeHubParaId, - Runtime, - >; - - type ByteFee = xcm_config::bridging::XcmBridgeHubRouterByteFee; - type FeeAsset = xcm_config::bridging::XcmBridgeHubRouterFeeAssetId; -} - /// XCM router instance to BridgeHub with bridging capabilities for `Westend` global /// consensus with dynamic fees and back-pressure. pub type ToWestendXcmRouterInstance = pallet_xcm_bridge_hub_router::Instance3; impl pallet_xcm_bridge_hub_router::Config for Runtime { - type WeightInfo = weights::pallet_xcm_bridge_hub_router_to_westend::WeightInfo; + type WeightInfo = weights::pallet_xcm_bridge_hub_router::WeightInfo; type UniversalLocation = xcm_config::UniversalLocation; type BridgedNetworkId = xcm_config::bridging::to_westend::WestendNetwork; @@ -996,8 +919,6 @@ construct_runtime!( Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, // Bridge utilities. - ToWococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 43, - ToRococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 44, ToWestendXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 45, // The main stage. @@ -1073,9 +994,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] - [pallet_xcm_bridge_hub_router, ToWococo] [pallet_xcm_bridge_hub_router, ToWestend] - [pallet_xcm_bridge_hub_router, ToRococo] // XCM [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] // NOTE: Make sure you point to the individual modules below. @@ -1332,9 +1251,7 @@ impl_runtime_apis! { type Foreign = pallet_assets::Pallet::; type Pool = pallet_assets::Pallet::; - type ToWococo = XcmBridgeHubRouterBench; type ToWestend = XcmBridgeHubRouterBench; - type ToRococo = XcmBridgeHubRouterBench; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -1402,19 +1319,6 @@ impl_runtime_apis! { } } - impl XcmBridgeHubRouterConfig for Runtime { - fn make_congested() { - cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); - } - fn ensure_bridged_target_destination() -> MultiLocation { - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); - xcm_config::bridging::to_wococo::AssetHubWococo::get() - } - } impl XcmBridgeHubRouterConfig for Runtime { fn make_congested() { cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( @@ -1428,20 +1332,6 @@ impl_runtime_apis! { xcm_config::bridging::to_westend::AssetHubWestend::get() } } - impl XcmBridgeHubRouterConfig for Runtime { - fn make_congested() { - cumulus_pallet_xcmp_queue::bridging::suspend_channel_for_benchmarks::( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); - } - fn ensure_bridged_target_destination() -> MultiLocation { - xcm_config::Flavor::set(&RuntimeFlavor::Wococo); - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - xcm_config::bridging::SiblingBridgeHubParaId::get().into() - ); - xcm_config::bridging::to_rococo::AssetHubRococo::get() - } - } use xcm::latest::prelude::*; use xcm_config::{TokenLocation, MaxAssetsIntoHolding}; @@ -1498,11 +1388,11 @@ impl_runtime_apis! { MultiAsset { fun: Fungible(UNITS), id: Concrete(TokenLocation::get()) }, )); pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - // AssetHubRococo trusts AssetHubWococo as reserve for WOCs + // AssetHubRococo trusts AssetHubWestend as reserve for WNDs pub TrustedReserve: Option<(MultiLocation, MultiAsset)> = Some( ( - xcm_config::bridging::to_wococo::AssetHubWococo::get(), - MultiAsset::from((xcm_config::bridging::to_wococo::WocLocation::get(), 1000000000000 as u128)) + xcm_config::bridging::to_westend::AssetHubWestend::get(), + MultiAsset::from((xcm_config::bridging::to_westend::WndLocation::get(), 1000000000000 as u128)) ) ); } @@ -1577,9 +1467,7 @@ impl_runtime_apis! { type Foreign = pallet_assets::Pallet::; type Pool = pallet_assets::Pallet::; - type ToWococo = XcmBridgeHubRouterBench; type ToWestend = XcmBridgeHubRouterBench; - type ToRococo = XcmBridgeHubRouterBench; let whitelist: Vec = vec![ // Block Number diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index 0fc36d74ff0..252cf2630f4 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -36,9 +36,7 @@ pub mod pallet_timestamp; pub mod pallet_uniques; pub mod pallet_utility; pub mod pallet_xcm; -pub mod pallet_xcm_bridge_hub_router_to_rococo; -pub mod pallet_xcm_bridge_hub_router_to_westend; -pub mod pallet_xcm_bridge_hub_router_to_wococo; +pub mod pallet_xcm_bridge_hub_router; pub mod paritydb_weights; pub mod rocksdb_weights; pub mod xcm; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs similarity index 75% rename from cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs rename to cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs index 8c344b44f78..7e12453583d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_westend.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_bridge_hub_router` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -48,38 +48,34 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm_bridge_hub_router`. pub struct WeightInfo(PhantomData); impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) fn on_initialize_when_non_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `193` - // Estimated: `3658` - // Minimum execution time: 8_528_000 picoseconds. - Weight::from_parts(8_886_000, 0) - .saturating_add(Weight::from_parts(0, 3658)) - .saturating_add(T::DbWeight::get().reads(4)) + // Measured: `154` + // Estimated: `1639` + // Minimum execution time: 7_924_000 picoseconds. + Weight::from_parts(8_199_000, 0) + .saturating_add(Weight::from_parts(0, 1639)) + .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) fn on_initialize_when_congested() -> Weight { // Proof Size summary in bytes: - // Measured: `183` - // Estimated: `3648` - // Minimum execution time: 5_170_000 picoseconds. - Weight::from_parts(5_433_000, 0) - .saturating_add(Weight::from_parts(0, 3648)) - .saturating_add(T::DbWeight::get().reads(3)) + // Measured: `144` + // Estimated: `1629` + // Minimum execution time: 4_265_000 picoseconds. + Weight::from_parts(4_417_000, 0) + .saturating_add(Weight::from_parts(0, 1629)) + .saturating_add(T::DbWeight::get().reads(2)) } /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) /// Proof: `ToWestendXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) @@ -87,16 +83,16 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh // Proof Size summary in bytes: // Measured: `150` // Estimated: `1502` - // Minimum execution time: 10_283_000 picoseconds. - Weight::from_parts(10_762_000, 0) + // Minimum execution time: 10_292_000 picoseconds. + Weight::from_parts(10_797_000, 0) .saturating_add(Weight::from_parts(0, 1502)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x3302afcb67e838a3f960251b417b9a4f` (r:1 w:0) /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) /// Storage: `ToWestendXcmRouter::Bridge` (r:1 w:1) @@ -113,16 +109,16 @@ impl pallet_xcm_bridge_hub_router::WeightInfo for Weigh /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) + /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) fn send_message() -> Weight { // Proof Size summary in bytes: // Measured: `387` // Estimated: `3852` - // Minimum execution time: 52_040_000 picoseconds. - Weight::from_parts(53_500_000, 0) + // Minimum execution time: 61_995_000 picoseconds. + Weight::from_parts(65_137_000, 0) .saturating_add(Weight::from_parts(0, 3852)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(4)) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs deleted file mode 100644 index ff00ace25b8..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_rococo.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm_bridge_hub_router` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_bridge_hub_router -// --chain=asset-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm_bridge_hub_router`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn on_initialize_when_non_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `265` - // Estimated: `3730` - // Minimum execution time: 9_084_000 picoseconds. - Weight::from_parts(9_441_000, 0) - .saturating_add(Weight::from_parts(0, 3730)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn on_initialize_when_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `202` - // Estimated: `3667` - // Minimum execution time: 5_971_000 picoseconds. - Weight::from_parts(6_260_000, 0) - .saturating_add(Weight::from_parts(0, 3667)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `117` - // Estimated: `1502` - // Minimum execution time: 10_231_000 picoseconds. - Weight::from_parts(10_861_000, 0) - .saturating_add(Weight::from_parts(0, 1502)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Storage: `ToRococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToRococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `478` - // Estimated: `3943` - // Minimum execution time: 53_966_000 picoseconds. - Weight::from_parts(55_224_000, 0) - .saturating_add(Weight::from_parts(0, 3943)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs deleted file mode 100644 index ca371f1e6ce..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_xcm_bridge_hub_router_to_wococo.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm_bridge_hub_router` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_bridge_hub_router -// --chain=asset-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm_bridge_hub_router`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm_bridge_hub_router::WeightInfo for WeightInfo { - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn on_initialize_when_non_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `231` - // Estimated: `3696` - // Minimum execution time: 9_115_000 picoseconds. - Weight::from_parts(9_522_000, 0) - .saturating_add(Weight::from_parts(0, 3696)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn on_initialize_when_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `183` - // Estimated: `3648` - // Minimum execution time: 5_207_000 picoseconds. - Weight::from_parts(5_534_000, 0) - .saturating_add(Weight::from_parts(0, 3648)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `83` - // Estimated: `1502` - // Minimum execution time: 10_437_000 picoseconds. - Weight::from_parts(10_956_000, 0) - .saturating_add(Weight::from_parts(0, 1502)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x0973fe64c85043ba1c965cbc38eb63c7` (r:1 w:0) - /// Storage: `ToWococoXcmRouter::Bridge` (r:1 w:1) - /// Proof: `ToWococoXcmRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: 512, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn send_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `425` - // Estimated: `3890` - // Minimum execution time: 52_176_000 picoseconds. - Weight::from_parts(54_067_000, 0) - .saturating_add(Weight::from_parts(0, 3890)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index fe5123a427c..7fab3584250 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 20_940_000 picoseconds. - Weight::from_parts(21_453_000, 3593) + // Minimum execution time: 21_643_000 picoseconds. + Weight::from_parts(22_410_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,15 +65,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 44_310_000 picoseconds. - Weight::from_parts(44_948_000, 6196) + // Minimum execution time: 43_758_000 picoseconds. + Weight::from_parts(44_654_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -92,25 +90,21 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `8799` - // Minimum execution time: 87_226_000 picoseconds. - Weight::from_parts(89_399_000, 8799) - .saturating_add(T::DbWeight::get().reads(11)) + // Minimum execution time: 87_978_000 picoseconds. + Weight::from_parts(88_517_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn reserve_asset_deposited() -> Weight { // Proof Size summary in bytes: - // Measured: `39` - // Estimated: `3504` - // Minimum execution time: 7_320_000 picoseconds. - Weight::from_parts(7_453_000, 3504) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 6_883_000 picoseconds. + Weight::from_parts(6_979_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -131,17 +125,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 183_539_000 picoseconds. - Weight::from_parts(190_968_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 198_882_000 picoseconds. + Weight::from_parts(199_930_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_068_000 picoseconds. - Weight::from_parts(3_228_000, 0) + // Minimum execution time: 3_343_000 picoseconds. + Weight::from_parts(3_487_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -149,15 +143,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 18_788_000 picoseconds. - Weight::from_parts(19_240_000, 3593) + // Minimum execution time: 19_399_000 picoseconds. + Weight::from_parts(19_659_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -176,13 +168,11 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6196` - // Minimum execution time: 58_577_000 picoseconds. - Weight::from_parts(59_729_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 59_017_000 picoseconds. + Weight::from_parts(60_543_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -203,9 +193,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 45_804_000 picoseconds. - Weight::from_parts(46_702_000, 3610) - .saturating_add(T::DbWeight::get().reads(9)) + // Minimum execution time: 45_409_000 picoseconds. + Weight::from_parts(47_041_000, 3610) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index e2fe122a12d..4454494badc 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -48,8 +48,6 @@ use sp_std::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -70,17 +68,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 415_688_000 picoseconds. - Weight::from_parts(433_876_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 440_298_000 picoseconds. + Weight::from_parts(446_508_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_209_000 picoseconds. - Weight::from_parts(3_465_000, 0) + // Minimum execution time: 3_313_000 picoseconds. + Weight::from_parts(3_422_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -88,61 +86,59 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 7_940_000 picoseconds. - Weight::from_parts(8_208_000, 3568) + // Minimum execution time: 9_691_000 picoseconds. + Weight::from_parts(9_948_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 9_336_000 picoseconds. - Weight::from_parts(9_733_000, 0) + // Minimum execution time: 10_384_000 picoseconds. + Weight::from_parts(11_085_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_368_000 picoseconds. - Weight::from_parts(3_700_000, 0) + // Minimum execution time: 3_438_000 picoseconds. + Weight::from_parts(3_577_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_034_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_243_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_870_000 picoseconds. - Weight::from_parts(1_972_000, 0) + // Minimum execution time: 2_126_000 picoseconds. + Weight::from_parts(2_207_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_890_000 picoseconds. - Weight::from_parts(1_962_000, 0) + // Minimum execution time: 2_105_000 picoseconds. + Weight::from_parts(2_193_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_660_000 picoseconds. - Weight::from_parts(2_744_000, 0) + // Minimum execution time: 2_999_000 picoseconds. + Weight::from_parts(3_056_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_845_000 picoseconds. - Weight::from_parts(1_945_000, 0) + // Minimum execution time: 2_091_000 picoseconds. + Weight::from_parts(2_176_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -163,9 +159,9 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 54_283_000 picoseconds. - Weight::from_parts(54_969_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 55_728_000 picoseconds. + Weight::from_parts(56_704_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) @@ -174,8 +170,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 11_850_000 picoseconds. - Weight::from_parts(12_328_000, 3625) + // Minimum execution time: 12_839_000 picoseconds. + Weight::from_parts(13_457_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -183,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_891_000 picoseconds. - Weight::from_parts(1_950_000, 0) + // Minimum execution time: 2_116_000 picoseconds. + Weight::from_parts(2_219_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -204,8 +200,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 23_644_000 picoseconds. - Weight::from_parts(24_296_000, 3610) + // Minimum execution time: 24_891_000 picoseconds. + Weight::from_parts(25_583_000, 3610) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -215,47 +211,45 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_719_000 picoseconds. - Weight::from_parts(3_896_000, 0) + // Minimum execution time: 3_968_000 picoseconds. + Weight::from_parts(4_122_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 125_710_000 picoseconds. - Weight::from_parts(132_434_000, 0) + // Minimum execution time: 136_220_000 picoseconds. + Weight::from_parts(137_194_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_650_000 picoseconds. - Weight::from_parts(12_277_000, 0) + // Minimum execution time: 12_343_000 picoseconds. + Weight::from_parts(12_635_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_978_000 picoseconds. - Weight::from_parts(2_070_000, 0) + // Minimum execution time: 2_237_000 picoseconds. + Weight::from_parts(2_315_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_002_000, 0) + // Minimum execution time: 2_094_000 picoseconds. + Weight::from_parts(2_231_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_133_000 picoseconds. - Weight::from_parts(2_194_000, 0) + // Minimum execution time: 2_379_000 picoseconds. + Weight::from_parts(2_455_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -276,20 +270,18 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 58_644_000 picoseconds. - Weight::from_parts(60_614_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_734_000 picoseconds. + Weight::from_parts(61_964_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_185_000 picoseconds. - Weight::from_parts(5_366_000, 0) + // Minimum execution time: 5_500_000 picoseconds. + Weight::from_parts(5_720_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -310,56 +302,54 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 54_443_000 picoseconds. - Weight::from_parts(55_873_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 55_767_000 picoseconds. + Weight::from_parts(56_790_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_909_000 picoseconds. - Weight::from_parts(2_011_000, 0) + // Minimum execution time: 2_201_000 picoseconds. + Weight::from_parts(2_291_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_814_000 picoseconds. - Weight::from_parts(1_956_000, 0) + // Minimum execution time: 2_164_000 picoseconds. + Weight::from_parts(2_241_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_875_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 2_127_000 picoseconds. + Weight::from_parts(2_236_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn universal_origin() -> Weight { // Proof Size summary in bytes: - // Measured: `39` - // Estimated: `3504` - // Minimum execution time: 7_376_000 picoseconds. - Weight::from_parts(7_620_000, 3504) - .saturating_add(T::DbWeight::get().reads(2)) + // Measured: `0` + // Estimated: `1489` + // Minimum execution time: 4_275_000 picoseconds. + Weight::from_parts(4_381_000, 1489) + .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_863_000 picoseconds. - Weight::from_parts(1_964_000, 0) + // Minimum execution time: 2_132_000 picoseconds. + Weight::from_parts(2_216_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_956_000 picoseconds. - Weight::from_parts(2_057_000, 0) + // Minimum execution time: 2_265_000 picoseconds. + Weight::from_parts(2_332_000, 0) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index b0bf9e82729..b85cb76642f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -16,9 +16,8 @@ use super::{ AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, BaseDeliveryFee, FeeAssetId, ForeignAssets, ForeignAssetsInstance, ParachainInfo, ParachainSystem, PolkadotXcm, - PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeFlavor, RuntimeOrigin, - ToRococoXcmRouter, ToWestendXcmRouter, ToWococoXcmRouter, TransactionByteFee, - TrustBackedAssetsInstance, WeightToFee, XcmpQueue, + PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, ToWestendXcmRouter, + TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, }; use assets_common::{ local_and_foreign_assets::MatchesLocalAndForeignAssetsMultiLocation, @@ -26,7 +25,7 @@ use assets_common::{ }; use frame_support::{ match_types, parameter_types, - traits::{ConstU32, Contains, Equals, Everything, Get, Nothing, PalletInfoAccess}, + traits::{ConstU32, Contains, Equals, Everything, Nothing, PalletInfoAccess}, }; use frame_system::EnsureRoot; use pallet_xcm::XcmPassthrough; @@ -61,8 +60,8 @@ use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; use cumulus_primitives_core::ParaId; parameter_types! { - pub storage Flavor: RuntimeFlavor = RuntimeFlavor::default(); pub const TokenLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Rococo; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); @@ -79,22 +78,6 @@ parameter_types! { pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } -/// Adapter for resolving `NetworkId` based on `pub storage Flavor: RuntimeFlavor`. -pub struct RelayNetwork; -impl Get> for RelayNetwork { - fn get() -> Option { - Some(Self::get()) - } -} -impl Get for RelayNetwork { - fn get() -> NetworkId { - match Flavor::get() { - RuntimeFlavor::Rococo => NetworkId::Rococo, - RuntimeFlavor::Wococo => NetworkId::Wococo, - } - } -} - /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. @@ -285,8 +268,7 @@ impl Contains for SafeCallFilter { if items.iter().all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterByteFee::key())) || items .iter() - .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) || - items.iter().all(|(k, _)| k.eq(&Flavor::key())) => + .all(|(k, _)| k.eq(&bridging::XcmBridgeHubRouterBaseFee::key())) => return true, _ => (), }; @@ -475,12 +457,8 @@ impl Contains for SafeCallFilter { pallet_uniques::Call::set_collection_max_supply { .. } | pallet_uniques::Call::set_price { .. } | pallet_uniques::Call::buy_item { .. } - ) | RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } ) | RuntimeCall::ToWestendXcmRouter( pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) | RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } ) ) } @@ -572,11 +550,7 @@ impl xcm_executor::Config for XcmConfig { // as reserve locations (we trust the Bridge Hub to relay the message that a reserve is being // held). Asset Hub may _act_ as a reserve location for ROC and assets created // under `pallet-assets`. Users must use teleport where allowed (e.g. ROC with the Relay Chain). - type IsReserve = ( - bridging::to_wococo::IsTrustedBridgedReserveLocationForConcreteAsset, - bridging::to_westend::IsTrustedBridgedReserveLocationForConcreteAsset, - bridging::to_rococo::IsTrustedBridgedReserveLocationForConcreteAsset, - ); + type IsReserve = (bridging::to_westend::IsTrustedBridgedReserveLocationForConcreteAsset,); type IsTeleporter = TrustedTeleporters; type UniversalLocation = UniversalLocation; type Barrier = Barrier; @@ -627,11 +601,7 @@ impl xcm_executor::Config for XcmConfig { XcmFeeToAccount, >; type MessageExporter = (); - type UniversalAliases = ( - bridging::to_wococo::UniversalAliases, - bridging::to_rococo::UniversalAliases, - bridging::to_westend::UniversalAliases, - ); + type UniversalAliases = (bridging::to_westend::UniversalAliases,); type CallDispatcher = WithOriginFilter; type SafeCallFilter = SafeCallFilter; type Aliasers = Nothing; @@ -656,15 +626,9 @@ type LocalXcmRouter = ( /// queues. pub type XcmRouter = WithUniqueTopic<( LocalXcmRouter, - // Router which wraps and sends xcm to BridgeHub to be delivered to the Wococo - // GlobalConsensus - ToWococoXcmRouter, // Router which wraps and sends xcm to BridgeHub to be delivered to the Westend // GlobalConsensus ToWestendXcmRouter, - // Router which wraps and sends xcm to BridgeHub to be delivered to the Rococo - // GlobalConsensus - ToRococoXcmRouter, )>; impl pallet_xcm::Config for Runtime { @@ -731,7 +695,7 @@ impl pallet_asset_conversion::BenchmarkHelper> for BenchmarkMultiLocationConverter where - SelfParaId: Get, + SelfParaId: frame_support::traits::Get, { fn asset_id(asset_id: u32) -> MultiLocation { MultiLocation { @@ -754,7 +718,7 @@ pub mod bridging { use assets_common::matching; use sp_std::collections::btree_set::BTreeSet; - // common/shared parameters for Wococo/Rococo + // common/shared parameters parameter_types! { /// Base price of every byte of the Rococo -> Westend message. Can be adjusted via /// governance `set_storage` call. @@ -775,10 +739,7 @@ pub mod bridging { /// governance `set_storage` call. pub storage XcmBridgeHubRouterByteFee: Balance = TransactionByteFee::get(); - pub SiblingBridgeHubParaId: u32 = match Flavor::get() { - RuntimeFlavor::Rococo => bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - RuntimeFlavor::Wococo => bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - }; + pub SiblingBridgeHubParaId: u32 = bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID; pub SiblingBridgeHub: MultiLocation = MultiLocation::new(1, X1(Parachain(SiblingBridgeHubParaId::get()))); /// Router expects payment with this `AssetId`. /// (`AssetId` has to be aligned with `BridgeTable`) @@ -786,90 +747,12 @@ pub mod bridging { pub BridgeTable: sp_std::vec::Vec = sp_std::vec::Vec::new().into_iter() - .chain(to_wococo::BridgeTable::get()) .chain(to_westend::BridgeTable::get()) - .chain(to_rococo::BridgeTable::get()) .collect(); } pub type NetworkExportTable = xcm_builder::NetworkExportTable; - pub mod to_wococo { - use super::*; - - parameter_types! { - pub SiblingBridgeHubWithBridgeHubWococoInstance: MultiLocation = MultiLocation::new( - 1, - X2( - Parachain(SiblingBridgeHubParaId::get()), - PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX) - ) - ); - - pub const WococoNetwork: NetworkId = NetworkId::Wococo; - pub AssetHubWococo: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(WococoNetwork::get()), Parachain(bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID))); - pub WocLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(WococoNetwork::get()))); - - pub WocFromAssetHubWococo: (MultiAssetFilter, MultiLocation) = ( - Wild(AllOf { fun: WildFungible, id: Concrete(WocLocation::get()) }), - AssetHubWococo::get() - ); - - /// Set up exporters configuration. - /// `Option` represents static "base fee" which is used for total delivery fee calculation. - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ - NetworkExportTableItem::new( - WococoNetwork::get(), - Some(sp_std::vec![ - AssetHubWococo::get().interior.split_global().expect("invalid configuration for AssetHubWococo").1, - ]), - SiblingBridgeHub::get(), - // base delivery fee to local `BridgeHub` - Some(( - XcmBridgeHubRouterFeeAssetId::get(), - XcmBridgeHubRouterBaseFee::get(), - ).into()) - ) - ]; - - /// Universal aliases - pub UniversalAliases: BTreeSet<(MultiLocation, Junction)> = BTreeSet::from_iter( - sp_std::vec![ - (SiblingBridgeHubWithBridgeHubWococoInstance::get(), GlobalConsensus(WococoNetwork::get())) - ] - ); - } - - impl Contains<(MultiLocation, Junction)> for UniversalAliases { - fn contains(alias: &(MultiLocation, Junction)) -> bool { - UniversalAliases::get().contains(alias) - } - } - - /// Trusted reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive WOC from AssetHubWococo - xcm_builder::Case, - // and nothing else - ), - >; - - impl Contains for ToWococoXcmRouter { - fn contains(call: &RuntimeCall) -> bool { - matches!( - call, - RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } - } - } - pub mod to_westend { use super::*; @@ -946,82 +829,6 @@ pub mod bridging { } } - pub mod to_rococo { - use super::*; - - parameter_types! { - pub SiblingBridgeHubWithBridgeHubRococoInstance: MultiLocation = MultiLocation::new( - 1, - X2( - Parachain(SiblingBridgeHubParaId::get()), - PalletInstance(bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX) - ) - ); - - pub const RococoNetwork: NetworkId = NetworkId::Rococo; - pub AssetHubRococo: MultiLocation = MultiLocation::new(2, X2(GlobalConsensus(RococoNetwork::get()), Parachain(bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID))); - pub RocLocation: MultiLocation = MultiLocation::new(2, X1(GlobalConsensus(RococoNetwork::get()))); - - pub RocFromAssetHubRococo: (MultiAssetFilter, MultiLocation) = ( - Wild(AllOf { fun: WildFungible, id: Concrete(RocLocation::get()) }), - AssetHubRococo::get() - ); - - /// Set up exporters configuration. - /// `Option` represents static "base fee" which is used for total delivery fee calculation. - pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ - NetworkExportTableItem::new( - RococoNetwork::get(), - Some(sp_std::vec![ - AssetHubRococo::get().interior.split_global().expect("invalid configuration for AssetHubRococo").1, - ]), - SiblingBridgeHub::get(), - // base delivery fee to local `BridgeHub` - Some(( - XcmBridgeHubRouterFeeAssetId::get(), - XcmBridgeHubRouterBaseFee::get(), - ).into()) - ) - ]; - - /// Universal aliases - pub UniversalAliases: BTreeSet<(MultiLocation, Junction)> = BTreeSet::from_iter( - sp_std::vec![ - (SiblingBridgeHubWithBridgeHubRococoInstance::get(), GlobalConsensus(RococoNetwork::get())) - ] - ); - } - - impl Contains<(MultiLocation, Junction)> for UniversalAliases { - fn contains(alias: &(MultiLocation, Junction)) -> bool { - UniversalAliases::get().contains(alias) - } - } - - /// Reserve locations filter for `xcm_executor::Config::IsReserve`. - /// Locations from which the runtime accepts reserved assets. - pub type IsTrustedBridgedReserveLocationForConcreteAsset = - matching::IsTrustedBridgedReserveLocationForConcreteAsset< - UniversalLocation, - ( - // allow receive ROC from AssetHubRococo - xcm_builder::Case, - // and nothing else - ), - >; - - impl Contains for ToRococoXcmRouter { - fn contains(call: &RuntimeCall) -> bool { - matches!( - call, - RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { .. } - ) - ) - } - } - } - /// Benchmarks helper for bridging configuration. #[cfg(feature = "runtime-benchmarks")] pub struct BridgingBenchmarksHelper; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index b4f4e828dde..7bb71a77de7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -28,8 +28,8 @@ pub use asset_hub_rococo_runtime::{ }, AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, RuntimeFlavor, SessionKeys, System, ToRococoXcmRouterInstance, - ToWestendXcmRouterInstance, ToWococoXcmRouterInstance, TrustBackedAssetsInstance, XcmpQueue, + RuntimeCall, RuntimeEvent, SessionKeys, System, ToWestendXcmRouterInstance, + TrustBackedAssetsInstance, XcmpQueue, }; use asset_test_utils::{ test_cases_over_bridge::TestBridgingConfig, CollatorSessionKey, CollatorSessionKeys, ExtBuilder, @@ -674,15 +674,6 @@ fn limited_reserve_transfer_assets_for_native_asset_over_bridge_works( mod asset_hub_rococo_tests { use super::*; - fn bridging_to_asset_hub_wococo() -> TestBridgingConfig { - asset_test_utils::test_cases_over_bridge::TestBridgingConfig { - bridged_network: bridging::to_wococo::WococoNetwork::get(), - local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), - local_bridge_hub_location: bridging::SiblingBridgeHub::get(), - bridged_target_location: bridging::to_wococo::AssetHubWococo::get(), - } - } - fn bridging_to_asset_hub_westend() -> TestBridgingConfig { asset_test_utils::test_cases_over_bridge::TestBridgingConfig { bridged_network: bridging::to_westend::WestendNetwork::get(), @@ -692,13 +683,6 @@ mod asset_hub_rococo_tests { } } - #[test] - fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_wococo_works() { - limited_reserve_transfer_assets_for_native_asset_over_bridge_works( - bridging_to_asset_hub_wococo, - ) - } - #[test] fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_westend_works() { limited_reserve_transfer_assets_for_native_asset_over_bridge_works( @@ -706,31 +690,6 @@ mod asset_hub_rococo_tests { ) } - #[test] - fn receive_reserve_asset_deposited_woc_from_asset_hub_wococo_works() { - const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; - asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ForeignAssetsInstance, - >( - collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), - ExistentialDeposit::get(), - AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), - // receiving WOCs - (MultiLocation { parents: 2, interior: X1(GlobalConsensus(Wococo)) }, 1000000000000, 1_000_000_000), - bridging_to_asset_hub_wococo, - ( - X1(PalletInstance(bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX)), - GlobalConsensus(Wococo), - X1(Parachain(1000)) - ) - ) - } - #[test] fn receive_reserve_asset_deposited_wnd_from_asset_hub_westend_works() { const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; @@ -756,58 +715,6 @@ mod asset_hub_rococo_tests { ) } - #[test] - fn report_bridge_status_from_xcm_bridge_router_for_wococo_works() { - asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ToWococoXcmRouterInstance, - >( - collator_session_keys(), - bridging_to_asset_hub_wococo, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - .into(), - } - ] - .into() - }, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: false, - } - ) - .encode() - .into(), - } - ] - .into() - }, - ) - } - #[test] fn report_bridge_status_from_xcm_bridge_router_for_westend_works() { asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< @@ -863,22 +770,6 @@ mod asset_hub_rococo_tests { #[test] fn test_report_bridge_status_call_compatibility() { // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding - assert_eq!( - RuntimeCall::ToWococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode(), - bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - ); assert_eq!( RuntimeCall::ToWestendXcmRouter( pallet_xcm_bridge_hub_router::Call::report_bridge_status { @@ -897,19 +788,6 @@ mod asset_hub_rococo_tests { ); } - #[test] - fn check_sane_weight_report_bridge_status_for_wococo() { - use pallet_xcm_bridge_hub_router::WeightInfo; - let actual = >::WeightInfo::report_bridge_status(); - let max_weight = bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); - assert!( - actual.all_lte(max_weight), - "max_weight: {:?} should be adjusted to actual {:?}", - max_weight, - actual - ); - } - #[test] fn check_sane_weight_report_bridge_status_for_westend() { use pallet_xcm_bridge_hub_router::WeightInfo; @@ -955,167 +833,6 @@ mod asset_hub_rococo_tests { } } -mod asset_hub_wococo_tests { - use super::*; - - fn bridging_to_asset_hub_rococo() -> TestBridgingConfig { - TestBridgingConfig { - bridged_network: bridging::to_rococo::RococoNetwork::get(), - local_bridge_hub_para_id: bridging::SiblingBridgeHubParaId::get(), - local_bridge_hub_location: bridging::SiblingBridgeHub::get(), - bridged_target_location: bridging::to_rococo::AssetHubRococo::get(), - } - } - - pub(crate) fn set_wococo_flavor() { - let flavor_key = xcm_config::Flavor::key().to_vec(); - let flavor = RuntimeFlavor::Wococo; - - // encode `set_storage` call - let set_storage_call = RuntimeCall::System(frame_system::Call::::set_storage { - items: vec![(flavor_key, flavor.encode())], - }) - .encode(); - - // estimate - storing just 1 value - use frame_system::WeightInfo; - let require_weight_at_most = - ::SystemWeightInfo::set_storage(1); - - // execute XCM with Transact to `set_storage` as governance does - assert_ok!(RuntimeHelper::execute_as_governance(set_storage_call, require_weight_at_most) - .ensure_complete()); - - // check if stored - assert_eq!(flavor, xcm_config::Flavor::get()); - } - - fn with_wococo_flavor_bridging_to_asset_hub_rococo() -> TestBridgingConfig { - set_wococo_flavor(); - bridging_to_asset_hub_rococo() - } - - #[test] - fn limited_reserve_transfer_assets_for_native_asset_to_asset_hub_rococo_works() { - limited_reserve_transfer_assets_for_native_asset_over_bridge_works( - with_wococo_flavor_bridging_to_asset_hub_rococo, - ) - } - - #[test] - fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_works() { - const BLOCK_AUTHOR_ACCOUNT: [u8; 32] = [13; 32]; - asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ForeignAssetsInstance, - >( - collator_session_keys().add(collator_session_key(BLOCK_AUTHOR_ACCOUNT)), - ExistentialDeposit::get(), - AccountId::from([73; 32]), - AccountId::from(BLOCK_AUTHOR_ACCOUNT), - // receiving ROCs - (MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }, 1000000000000, 1_000_000_000), - with_wococo_flavor_bridging_to_asset_hub_rococo, - ( - X1(PalletInstance(bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX)), - GlobalConsensus(Rococo), - X1(Parachain(1000)) - ) - ) - } - - #[test] - fn report_bridge_status_from_xcm_bridge_router_works() { - asset_test_utils::test_cases_over_bridge::report_bridge_status_from_xcm_bridge_router_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - LocationToAccountId, - ToRococoXcmRouterInstance, - >( - collator_session_keys(), - with_wococo_flavor_bridging_to_asset_hub_rococo, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - .into(), - } - ] - .into() - }, - || { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: false, - } - ) - .encode() - .into(), - } - ] - .into() - }, - ) - } - - #[test] - fn test_report_bridge_status_call_compatibility() { - // if this test fails, make sure `bp_asset_hub_rococo` has valid encoding - assert_eq!( - RuntimeCall::ToRococoXcmRouter( - pallet_xcm_bridge_hub_router::Call::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode(), - bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested: true, - } - ) - .encode() - ) - } - - #[test] - fn check_sane_weight_report_bridge_status() { - use pallet_xcm_bridge_hub_router::WeightInfo; - let actual = >::WeightInfo::report_bridge_status(); - let max_weight = bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(); - assert!( - actual.all_lte(max_weight), - "max_weight: {:?} should be adjusted to actual {:?}", - max_weight, - actual - ); - } -} - #[test] fn change_xcm_bridge_hub_router_byte_fee_by_governance_works() { asset_test_utils::test_cases::change_storage_constant_by_governance_works::< diff --git a/cumulus/parachains/runtimes/bridge-hubs/README.md b/cumulus/parachains/runtimes/bridge-hubs/README.md index b2a14a0405d..cf617db730d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/README.md +++ b/cumulus/parachains/runtimes/bridge-hubs/README.md @@ -1,14 +1,5 @@ - [Bridge-hub Parachains](#bridge-hub-parachains) - [Requirements for local run/testing](#requirements-for-local-runtesting) - - [How to test local Rococo <-> Wococo bridge](#how-to-test-local-rococo---wococo-bridge) - - [Run Rococo/Wococo chains with zombienet](#run-rococowococo-chains-with-zombienet) - - [Init bridge and run relayer between BridgeHubRococo and - BridgeHubWococo](#init-bridge-and-run-relayer-between-bridgehubrococo-and-bridgehubwococo) - - [Initialize configuration for transfer asset over bridge - (ROCs/WOCs)](#initialize-configuration-for-transfer-asset-over-bridge-rocswocs) - - [Send messages - transfer asset over bridge (ROCs/WOCs)](#send-messages---transfer-asset-over-bridge-rocswocs) - - [Claim relayer's rewards on BridgeHubRococo and - BridgeHubWococo](#claim-relayers-rewards-on-bridgehubrococo-and-bridgehubwococo) - [How to test local Rococo <-> Westend bridge](#how-to-test-local-rococo---westend-bridge) - [Run Rococo/Westend chains with zombienet](#run-rococowestend-chains-with-zombienet) - [Init bridge and run relayer between BridgeHubRococo and @@ -53,17 +44,7 @@ Copy the apropriate binary (zombienet-linux) from the latest release to ~/local_ --- # 2. Build polkadot binary -# If you want to test Kusama/Polkadot bridge, we need "sudo pallet + fast-runtime", -# so we need to use sudofi in polkadot directory. -# -# Install sudofi: (skip if already installed) -# cd -# git clone https://github.com/paritytech/parachain-utils.git -# cd parachain-utils # -> this is -# cargo build --release --bin sudofi -# -# cd /polkadot -# /target/release/sudofi +We need polkadot binary with "fast-runtime" feature: cd cargo build --release --features fast-runtime --bin polkadot @@ -100,112 +81,6 @@ cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-paracha cp target/release/polkadot-parachain ~/local_bridge_testing/bin/polkadot-parachain-asset-hub ``` - -## How to test local Rococo <-> Wococo bridge - -### Run Rococo/Wococo chains with zombienet - -``` -cd - -# Rococo + BridgeHubRococo + AssetHub for Rococo (mirroring Kusama) -POLKADOT_BINARY_PATH=~/local_bridge_testing/bin/polkadot \ -POLKADOT_PARACHAIN_BINARY_PATH=~/local_bridge_testing/bin/polkadot-parachain \ -POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ - ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml -``` - -``` -cd - -# Wococo + BridgeHubWococo + AssetHub for Wococo (mirroring Polkadot) -POLKADOT_BINARY_PATH=~/local_bridge_testing/bin/polkadot \ -POLKADOT_PARACHAIN_BINARY_PATH=~/local_bridge_testing/bin/polkadot-parachain \ -POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO=~/local_bridge_testing/bin/polkadot-parachain-asset-hub \ - ~/local_bridge_testing/bin/zombienet-linux --provider native spawn ./cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml -``` - -### Init bridge and run relayer between BridgeHubRococo and BridgeHubWococo - -**Accounts of BridgeHub parachains:** -- `Bob` is pallet owner of all bridge pallets - -#### Run with script -``` -cd - -./cumulus/scripts/bridges_rococo_wococo.sh run-relay -``` - -**Check relay-chain headers relaying:** -- Rococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8943#/chainstate - Pallet: - **bridgeWococoGrandpa** - Keys: **bestFinalized()** -- Wococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: - **bridgeRococoGrandpa** - Keys: **bestFinalized()** - -**Check parachain headers relaying:** -- Rococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8943#/chainstate - Pallet: - **bridgeWococoParachains** - Keys: **parasInfo(None)** -- Wococo parachain: - https://polkadot.js.org/apps/?rpc=ws%3A%2F%2F127.0.0.1%3A8945#/chainstate - Pallet: - **bridgeRococoParachains** - Keys: **parasInfo(None)** - -### Initialize configuration for transfer asset over bridge (ROCs/WOCs) - -This initialization does several things: -- creates `ForeignAssets` for wrappedROCs/wrappedWOCs -- drips SA for AssetHubRococo on AssetHubWococo (and vice versa) which holds reserved assets on source chains -``` -cd - -./cumulus/scripts/bridges_rococo_wococo.sh init-asset-hub-rococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-bridge-hub-rococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-asset-hub-wococo-local -./cumulus/scripts/bridges_rococo_wococo.sh init-bridge-hub-wococo-local -``` - -### Send messages - transfer asset over bridge (ROCs/WOCs) - -Do (asset) transfers: -``` -cd - -# ROCs from Rococo's Asset Hub to Wococo's. -./cumulus/scripts/bridges_rococo_wococo.sh reserve-transfer-assets-from-asset-hub-rococo-local -``` -``` -cd - -# WOCs from Wococo's Asset Hub to Rococo's. -./cumulus/scripts/bridges_rococo_wococo.sh reserve-transfer-assets-from-asset-hub-wococo-local -``` - -- open explorers: (see zombienets) - - AssetHubRococo (see events `xcmpQueue.XcmpMessageSent`, `polkadotXcm.Attempted`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9910#/explorer - - BridgeHubRococo (see `bridgeWococoMessages.MessageAccepted`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - - BridgeHubWococo (see `bridgeRococoMessages.MessagesReceived`, `xcmpQueue.XcmpMessageSent`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer - - AssetHubWococo (see `foreignAssets.Issued`, `xcmpQueue.Success`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:9010#/explorer - - BridgeHubRocococ (see `bridgeWococoMessages.MessagesDelivered`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - -### Claim relayer's rewards on BridgeHubRococo and BridgeHubWococo - -**Accounts of BridgeHub parachains:** -- `//Charlie` is relayer account on BridgeHubRococo -- `//Charlie` is relayer account on BridgeHubWococo - -``` -cd - -# Claim rewards on BridgeHubWococo: -./cumulus/scripts/bridges_rococo_wococo.sh claim-rewards-bridge-hub-rococo-local - -# Claim rewards on BridgeHubWococo: -./cumulus/scripts/bridges_rococo_wococo.sh claim-rewards-bridge-hub-wococo-local -``` - -- open explorers: (see zombienets) - - BridgeHubRococo (see 2x `bridgeRelayers.RewardPaid`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8943#/explorer - - BridgeHubWococo (see 2x `bridgeRelayers.RewardPaid`) https://polkadot.js.org/apps/?rpc=ws://127.0.0.1:8945#/explorer - ## How to test local Rococo <-> Westend bridge ### Run Rococo/Westend chains with zombienet diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 671d38e808f..c475768d5dd 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -77,10 +77,8 @@ parachains-common = { path = "../../../common", default-features = false } # Bridges bp-asset-hub-rococo = { path = "../../../../../bridges/primitives/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "../../../../../bridges/primitives/chain-asset-hub-westend", default-features = false } -bp-asset-hub-wococo = { path = "../../../../../bridges/primitives/chain-asset-hub-wococo", default-features = false } bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } bp-bridge-hub-westend = { path = "../../../../../bridges/primitives/chain-bridge-hub-westend", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } @@ -89,7 +87,6 @@ bp-relayers = { path = "../../../../../bridges/primitives/relayers", default-fea bp-runtime = { path = "../../../../../bridges/primitives/runtime", default-features = false } bp-rococo = { path = "../../../../../bridges/primitives/chain-rococo", default-features = false } bp-westend = { path = "../../../../../bridges/primitives/chain-westend", default-features = false } -bp-wococo = { path = "../../../../../bridges/primitives/chain-wococo", default-features = false } pallet-bridge-grandpa = { path = "../../../../../bridges/modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../../../bridges/modules/messages", default-features = false } pallet-bridge-parachains = { path = "../../../../../bridges/modules/parachains", default-features = false } @@ -107,10 +104,8 @@ default = [ "std" ] std = [ "bp-asset-hub-rococo/std", "bp-asset-hub-westend/std", - "bp-asset-hub-wococo/std", "bp-bridge-hub-rococo/std", "bp-bridge-hub-westend/std", - "bp-bridge-hub-wococo/std", "bp-header-chain/std", "bp-messages/std", "bp-parachains/std", @@ -119,7 +114,6 @@ std = [ "bp-rococo/std", "bp-runtime/std", "bp-westend/std", - "bp-wococo/std", "bridge-runtime-common/std", "codec/std", "cumulus-pallet-aura-ext/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs index 296ec88a856..8153e52beac 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_common_config.rs @@ -29,10 +29,6 @@ parameter_types! { pub const RelayChainHeadersToKeep: u32 = 1024; pub const ParachainHeadsToKeep: u32 = 64; - pub const RococoBridgeParachainPalletName: &'static str = "Paras"; - pub const MaxRococoParaHeadDataSize: u32 = bp_rococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; - pub const WococoBridgeParachainPalletName: &'static str = "Paras"; - pub const MaxWococoParaHeadDataSize: u32 = bp_wococo::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; pub const WestendBridgeParachainPalletName: &'static str = "Paras"; pub const MaxWestendParaHeadDataSize: u32 = bp_westend::MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE; @@ -43,52 +39,6 @@ parameter_types! { pub storage DeliveryRewardInBalance: u64 = 1_000_000; } -/// Add GRANDPA bridge pallet to track Wococo relay chain. -pub type BridgeGrandpaWococoInstance = pallet_bridge_grandpa::Instance1; -impl pallet_bridge_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = bp_wococo::Wococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; - type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_wococo_finality::WeightInfo; -} - -/// Add parachain bridge pallet to track Wococo BridgeHub parachain -pub type BridgeParachainWococoInstance = pallet_bridge_parachains::Instance1; -impl pallet_bridge_parachains::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_wococo::WeightInfo; - type BridgesGrandpaPalletInstance = BridgeGrandpaWococoInstance; - type ParasPalletName = WococoBridgeParachainPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ParachainHeadsToKeep; - type MaxParaHeadDataSize = MaxWococoParaHeadDataSize; -} - -/// Add GRANDPA bridge pallet to track Rococo relay chain. -pub type BridgeGrandpaRococoInstance = pallet_bridge_grandpa::Instance2; -impl pallet_bridge_grandpa::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = bp_rococo::Rococo; - type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; - type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_rococo_finality::WeightInfo; -} - -/// Add parachain bridge pallet to track Rococo BridgeHub parachain -pub type BridgeParachainRococoInstance = pallet_bridge_parachains::Instance2; -impl pallet_bridge_parachains::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_rococo::WeightInfo; - type BridgesGrandpaPalletInstance = BridgeGrandpaRococoInstance; - type ParasPalletName = RococoBridgeParachainPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ParachainHeadsToKeep; - type MaxParaHeadDataSize = MaxRococoParaHeadDataSize; -} - /// Add GRANDPA bridge pallet to track Westend relay chain. pub type BridgeGrandpaWestendInstance = pallet_bridge_grandpa::Instance3; impl pallet_bridge_grandpa::Config for Runtime { @@ -96,14 +46,14 @@ impl pallet_bridge_grandpa::Config for Runtime { type BridgedChain = bp_westend::Westend; type MaxFreeMandatoryHeadersPerBlock = ConstU32<4>; type HeadersToKeep = RelayChainHeadersToKeep; - type WeightInfo = weights::pallet_bridge_grandpa_westend_finality::WeightInfo; + type WeightInfo = weights::pallet_bridge_grandpa::WeightInfo; } /// Add parachain bridge pallet to track Westend BridgeHub parachain pub type BridgeParachainWestendInstance = pallet_bridge_parachains::Instance3; impl pallet_bridge_parachains::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_parachains_within_westend::WeightInfo; + type WeightInfo = weights::pallet_bridge_parachains::WeightInfo; type BridgesGrandpaPalletInstance = BridgeGrandpaWestendInstance; type ParasPalletName = WestendBridgeParachainPalletName; type ParaStoredHeaderDataBuilder = diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs deleted file mode 100644 index 35497c84068..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_rococo_config.rs +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Bridge definitions used on BridgeHub with the Wococo flavor for bridging to BridgeHubRococo. - -use crate::{ - bridge_common_config::{BridgeParachainRococoInstance, DeliveryRewardInBalance}, - weights, AccountId, BridgeRococoMessages, ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, - XcmRouter, -}; -use bp_messages::LaneId; -use bridge_runtime_common::{ - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, - messages_xcm_extension::{ - SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, - XcmBlobMessageDispatch, - }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, -}; -use codec::Encode; -use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; -use xcm::{ - latest::prelude::*, - prelude::{InteriorMultiLocation, NetworkId}, -}; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; - -parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubRococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pub BridgeHubWococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Wococo), Parachain(ParachainInfo::parachain_id().into())); - pub BridgeWococoToRococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub RococoGlobalConsensusNetwork: NetworkId = NetworkId::Rococo; - pub ActiveOutboundLanesToBridgeHubRococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO]; - pub const AssetHubWococoToAssetHubRococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO; - // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value - pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; - - pub AssetHubWococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID.into(); - pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); - - pub FromAssetHubWococoToAssetHubRococoRoute: SenderAndLane = SenderAndLane::new( - ParentThen(X1(Parachain(AssetHubWococoParaId::get().into()))).into(), - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - ); - - pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); - - pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); -} -pub const XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO: LaneId = LaneId([0, 0, 0, 1]); - -fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_wococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_wococo::Call::ToRococoXcmRouter( - bp_asset_hub_wococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested, - } - ) - .encode() - .into(), - } - ] -} - -/// Proof of messages, coming from Rococo. -pub type FromRococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; -/// Messages delivery proof for RococoBridge Hub -> Wococo BridgeHub messages. -pub type ToRococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; - -/// Dispatches received XCM messages from other bridge -type FromRococoMessageBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubWococoUniversalLocation, - BridgeWococoToRococoMessagesPalletInstance, ->; - -/// Export XCM messages to be relayed to the other side -pub type ToBridgeHubRococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - RococoGlobalConsensusNetwork, - (), ->; -pub struct ToBridgeHubRococoXcmBlobHauler; -impl XcmBlobHauler for ToBridgeHubRococoXcmBlobHauler { - type Runtime = Runtime; - type MessagesInstance = WithBridgeHubRococoMessagesInstance; - type SenderAndLane = FromAssetHubWococoToAssetHubRococoRoute; - - type ToSourceChainSender = XcmRouter; - type CongestedMessage = CongestedMessage; - type UncongestedMessage = UncongestedMessage; -} - -/// On messages delivered callback. -type OnMessagesDelivered = XcmBlobHaulerAdapter; - -/// Messaging Bridge configuration for BridgeHubWococo -> BridgeHubRococo -pub struct WithBridgeHubRococoMessageBridge; -impl MessageBridge for WithBridgeHubRococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubWococo; - type BridgedChain = BridgeHubRococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainRococoInstance, - bp_bridge_hub_rococo::BridgeHubRococo, - >; -} - -/// Message verifier for BridgeHubRococo messages sent from BridgeHubWococo -pub type ToBridgeHubRococoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Maximal outbound payload size of BridgeHubWococo -> BridgeHubRococo messages. -pub type ToBridgeHubRococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubRococo {} - -/// BridgeHubWococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWococo; - -impl UnderlyingChainProvider for BridgeHubWococo { - type Chain = bp_bridge_hub_wococo::BridgeHubWococo; -} - -impl ThisChainWithMessages for BridgeHubWococo { - type RuntimeOrigin = RuntimeOrigin; -} - -/// Signed extension that refunds relayers that are delivering messages from the Rococo parachain. -pub type OnBridgeHubWococoRefundBridgeHubRococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< - Runtime, - RefundableParachain, - RefundableMessagesLane< - WithBridgeHubRococoMessagesInstance, - AssetHubWococoToAssetHubRococoMessagesLane, - >, - ActualFeeRefund, - PriorityBoostPerMessage, - StrOnBridgeHubWococoRefundBridgeHubRococoMessages, - >, ->; -bp_runtime::generate_static_str_provider!(OnBridgeHubWococoRefundBridgeHubRococoMessages); - -/// Add XCM messages support for BridgeHubWococo to support Wococo->Rococo XCM messages -pub type WithBridgeHubRococoMessagesInstance = pallet_bridge_messages::Instance2; -impl pallet_bridge_messages::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_wococo_to_rococo::WeightInfo; - type BridgedChainId = BridgeHubRococoChainId; - type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubRococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = ToBridgeHubRococoMaximalOutboundPayloadSize; - type OutboundPayload = XcmAsPlainPayload; - - type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type LaneMessageVerifier = ToBridgeHubRococoMessageVerifier; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - Runtime, - WithBridgeHubRococoMessagesInstance, - DeliveryRewardInBalance, - >; - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = XcmBlobMessageDispatch< - FromRococoMessageBlobDispatcher, - Self::WeightInfo, - cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider< - AssetHubWococoParaId, - Runtime, - >, - >; - type OnMessagesDelivered = OnMessagesDelivered; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::bridge_common_config::BridgeGrandpaRococoInstance; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, - }, - }; - use parachains_common::{wococo, Balance}; - - /// Every additional message in the message delivery transaction boosts its priority. - /// So the priority of transaction with `N+1` messages is larger than priority of - /// transaction with `N` messages by the `PriorityBoostPerMessage`. - /// - /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. - /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. - /// - /// We want this tip to be large enough (delivery transactions with more messages = less - /// operational costs and a faster bridge), so this value should be significant. - const FEE_BOOST_PER_MESSAGE: Balance = 2 * wococo::currency::UNITS; - - #[test] - fn ensure_bridge_hub_wococo_message_lane_weights_are_correct() { - check_message_lane_weights::< - bp_bridge_hub_wococo::BridgeHubWococo, - Runtime, - WithBridgeHubRococoMessagesInstance, - >( - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE, - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - true, - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaRococoInstance, - with_bridged_chain_messages_instance: WithBridgeHubRococoMessagesInstance, - bridge: WithBridgeHubRococoMessageBridge, - this_chain: bp_wococo::Wococo, - bridged_chain: bp_rococo::Rococo, - ); - - assert_complete_bridge_constants::< - Runtime, - BridgeGrandpaRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_bridge_hub_wococo::BlockLength::get(), - block_weights: bp_bridge_hub_wococo::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_rococo::WITH_ROCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - }, - }); - - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< - Runtime, - WithBridgeHubRococoMessagesInstance, - PriorityBoostPerMessage, - >(FEE_BOOST_PER_MESSAGE); - - assert_eq!( - BridgeWococoToRococoMessagesPalletInstance::get(), - X1(PalletInstance( - bp_bridge_hub_wococo::WITH_BRIDGE_WOCOCO_TO_ROCOCO_MESSAGES_PALLET_INDEX - )) - ); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs index 36dcab09dea..f3c1c9597b5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_westend_config.rs @@ -196,7 +196,7 @@ bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundBridgeHubWesten pub type WithBridgeHubWestendMessagesInstance = pallet_bridge_messages::Instance3; impl pallet_bridge_messages::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_rococo_to_westend::WeightInfo; + type WeightInfo = weights::pallet_bridge_messages::WeightInfo; type BridgedChainId = BridgeHubWestendChainId; type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubWestend; type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs deleted file mode 100644 index 7780b02632c..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/bridge_to_wococo_config.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Bridge definitions used on BridgeHub with the Rococo flavor for bridging to BridgeHubWococo. - -use crate::{ - bridge_common_config::{BridgeParachainWococoInstance, DeliveryRewardInBalance}, - weights, AccountId, BridgeWococoMessages, ParachainInfo, Runtime, RuntimeEvent, RuntimeOrigin, - XcmRouter, -}; -use bp_messages::LaneId; -use bridge_runtime_common::{ - messages, - messages::{ - source::{FromBridgedChainMessagesDeliveryProof, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagesProof, SourceHeaderChainAdapter}, - MessageBridge, ThisChainWithMessages, UnderlyingChainProvider, - }, - messages_xcm_extension::{ - SenderAndLane, XcmAsPlainPayload, XcmBlobHauler, XcmBlobHaulerAdapter, - XcmBlobMessageDispatch, - }, - refund_relayer_extension::{ - ActualFeeRefund, RefundBridgedParachainMessages, RefundSignedExtensionAdapter, - RefundableMessagesLane, RefundableParachain, - }, -}; - -use codec::Encode; -use frame_support::{parameter_types, traits::PalletInfoAccess}; -use sp_runtime::RuntimeDebug; -use xcm::{ - latest::prelude::*, - prelude::{InteriorMultiLocation, NetworkId}, -}; -use xcm_builder::{BridgeBlobDispatcher, HaulBlobExporter}; - -parameter_types! { - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const BridgeHubWococoChainId: bp_runtime::ChainId = bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID; - pub BridgeRococoToWococoMessagesPalletInstance: InteriorMultiLocation = X1(PalletInstance(::index() as u8)); - pub BridgeHubRococoUniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(ParachainInfo::parachain_id().into())); - pub WococoGlobalConsensusNetwork: NetworkId = NetworkId::Wococo; - pub ActiveOutboundLanesToBridgeHubWococo: &'static [bp_messages::LaneId] = &[XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO]; - pub const AssetHubRococoToAssetHubWococoMessagesLane: bp_messages::LaneId = XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO; - // see the `FEE_BOOST_PER_MESSAGE` constant to get the meaning of this value - pub PriorityBoostPerMessage: u64 = 182_044_444_444_444; - - pub AssetHubRococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_rococo::ASSET_HUB_ROCOCO_PARACHAIN_ID.into(); - pub AssetHubWococoParaId: cumulus_primitives_core::ParaId = bp_asset_hub_wococo::ASSET_HUB_WOCOCO_PARACHAIN_ID.into(); - - pub FromAssetHubRococoToAssetHubWococoRoute: SenderAndLane = SenderAndLane::new( - ParentThen(X1(Parachain(AssetHubRococoParaId::get().into()))).into(), - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - ); - - pub CongestedMessage: Xcm<()> = build_congestion_message(true).into(); - - pub UncongestedMessage: Xcm<()> = build_congestion_message(false).into(); -} -pub const XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO: LaneId = LaneId([0, 0, 0, 1]); - -fn build_congestion_message(is_congested: bool) -> sp_std::vec::Vec> { - sp_std::vec![ - UnpaidExecution { weight_limit: Unlimited, check_origin: None }, - Transact { - origin_kind: OriginKind::Xcm, - require_weight_at_most: - bp_asset_hub_rococo::XcmBridgeHubRouterTransactCallMaxWeight::get(), - call: bp_asset_hub_rococo::Call::ToWococoXcmRouter( - bp_asset_hub_rococo::XcmBridgeHubRouterCall::report_bridge_status { - bridge_id: Default::default(), - is_congested, - } - ) - .encode() - .into(), - } - ] -} - -/// Proof of messages, coming from Wococo. -pub type FromWococoBridgeHubMessagesProof = - FromBridgedChainMessagesProof; -/// Messages delivery proof for Rococo Bridge Hub -> Wococo Bridge Hub messages. -pub type ToWococoBridgeHubMessagesDeliveryProof = - FromBridgedChainMessagesDeliveryProof; - -/// Dispatches received XCM messages from other bridge -type FromWococoMessageBlobDispatcher = BridgeBlobDispatcher< - XcmRouter, - BridgeHubRococoUniversalLocation, - BridgeRococoToWococoMessagesPalletInstance, ->; - -/// Export XCM messages to be relayed to the other side -pub type ToBridgeHubWococoHaulBlobExporter = HaulBlobExporter< - XcmBlobHaulerAdapter, - WococoGlobalConsensusNetwork, - (), ->; -pub struct ToBridgeHubWococoXcmBlobHauler; -impl XcmBlobHauler for ToBridgeHubWococoXcmBlobHauler { - type Runtime = Runtime; - type MessagesInstance = WithBridgeHubWococoMessagesInstance; - type SenderAndLane = FromAssetHubRococoToAssetHubWococoRoute; - - type ToSourceChainSender = XcmRouter; - type CongestedMessage = CongestedMessage; - type UncongestedMessage = UncongestedMessage; -} - -/// On messages delivered callback. -type OnMessagesDeliveredFromWococo = XcmBlobHaulerAdapter; - -/// Messaging Bridge configuration for BridgeHubRococo -> BridgeHubWococo -pub struct WithBridgeHubWococoMessageBridge; -impl MessageBridge for WithBridgeHubWococoMessageBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME; - type ThisChain = BridgeHubRococo; - type BridgedChain = BridgeHubWococo; - type BridgedHeaderChain = pallet_bridge_parachains::ParachainHeaders< - Runtime, - BridgeParachainWococoInstance, - bp_bridge_hub_wococo::BridgeHubWococo, - >; -} - -/// Message verifier for BridgeHubWococo messages sent from BridgeHubRococo -pub type ToBridgeHubWococoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Maximal outbound payload size of BridgeHubRococo -> BridgeHubWococo messages. -pub type ToBridgeHubWococoMaximalOutboundPayloadSize = - messages::source::FromThisChainMaximalOutboundPayloadSize; - -/// BridgeHubWococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubWococo; - -impl UnderlyingChainProvider for BridgeHubWococo { - type Chain = bp_bridge_hub_wococo::BridgeHubWococo; -} - -impl messages::BridgedChainWithMessages for BridgeHubWococo {} - -/// BridgeHubRococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct BridgeHubRococo; - -impl UnderlyingChainProvider for BridgeHubRococo { - type Chain = bp_bridge_hub_rococo::BridgeHubRococo; -} - -impl ThisChainWithMessages for BridgeHubRococo { - type RuntimeOrigin = RuntimeOrigin; -} - -/// Signed extension that refunds relayers that are delivering messages from the Wococo parachain. -pub type OnBridgeHubRococoRefundBridgeHubWococoMessages = RefundSignedExtensionAdapter< - RefundBridgedParachainMessages< - Runtime, - RefundableParachain, - RefundableMessagesLane< - WithBridgeHubWococoMessagesInstance, - AssetHubRococoToAssetHubWococoMessagesLane, - >, - ActualFeeRefund, - PriorityBoostPerMessage, - StrOnBridgeHubRococoRefundBridgeHubWococoMessages, - >, ->; -bp_runtime::generate_static_str_provider!(OnBridgeHubRococoRefundBridgeHubWococoMessages); - -/// Add XCM messages support for BridgeHubRococo to support Rococo->Wococo XCM messages -pub type WithBridgeHubWococoMessagesInstance = pallet_bridge_messages::Instance1; -impl pallet_bridge_messages::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_bridge_messages_rococo_to_wococo::WeightInfo; - type BridgedChainId = BridgeHubWococoChainId; - type ActiveOutboundLanes = ActiveOutboundLanesToBridgeHubWococo; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = ToBridgeHubWococoMaximalOutboundPayloadSize; - type OutboundPayload = XcmAsPlainPayload; - - type InboundPayload = XcmAsPlainPayload; - type InboundRelayer = AccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type LaneMessageVerifier = ToBridgeHubWococoMessageVerifier; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - Runtime, - WithBridgeHubWococoMessagesInstance, - DeliveryRewardInBalance, - >; - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = XcmBlobMessageDispatch< - FromWococoMessageBlobDispatcher, - Self::WeightInfo, - cumulus_pallet_xcmp_queue::bridging::OutXcmpChannelStatusProvider< - AssetHubRococoParaId, - Runtime, - >, - >; - type OnMessagesDelivered = OnMessagesDeliveredFromWococo; -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::bridge_common_config::BridgeGrandpaWococoInstance; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, check_message_lane_weights, - AssertBridgeMessagesPalletConstants, AssertBridgePalletNames, AssertChainConstants, - AssertCompleteBridgeConstants, - }, - }; - use parachains_common::{rococo, Balance}; - - /// Every additional message in the message delivery transaction boosts its priority. - /// So the priority of transaction with `N+1` messages is larger than priority of - /// transaction with `N` messages by the `PriorityBoostPerMessage`. - /// - /// Economically, it is an equivalent of adding tip to the transaction with `N` messages. - /// The `FEE_BOOST_PER_MESSAGE` constant is the value of this tip. - /// - /// We want this tip to be large enough (delivery transactions with more messages = less - /// operational costs and a faster bridge), so this value should be significant. - const FEE_BOOST_PER_MESSAGE: Balance = 2 * rococo::currency::UNITS; - - #[test] - fn ensure_bridge_hub_rococo_message_lane_weights_are_correct() { - check_message_lane_weights::< - bp_bridge_hub_rococo::BridgeHubRococo, - Runtime, - WithBridgeHubWococoMessagesInstance, - >( - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE, - bp_bridge_hub_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_bridge_hub_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - true, - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: BridgeGrandpaWococoInstance, - with_bridged_chain_messages_instance: WithBridgeHubWococoMessagesInstance, - bridge: WithBridgeHubWococoMessageBridge, - this_chain: bp_rococo::Rococo, - bridged_chain: bp_wococo::Wococo, - ); - - assert_complete_bridge_constants::< - Runtime, - BridgeGrandpaWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_bridge_hub_rococo::BlockLength::get(), - block_weights: bp_bridge_hub_rococo::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_bridge_hub_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_bridge_hub_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: - bp_bridge_hub_rococo::WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_wococo::WITH_WOCOCO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_bridge_hub_wococo::WITH_BRIDGE_HUB_WOCOCO_MESSAGES_PALLET_NAME, - }, - }); - - bridge_runtime_common::priority_calculator::ensure_priority_boost_is_sane::< - Runtime, - WithBridgeHubWococoMessagesInstance, - PriorityBoostPerMessage, - >(FEE_BOOST_PER_MESSAGE); - - assert_eq!( - BridgeRococoToWococoMessagesPalletInstance::get(), - X1(PalletInstance( - bp_bridge_hub_rococo::WITH_BRIDGE_ROCOCO_TO_WOCOCO_MESSAGES_PALLET_INDEX - )) - ); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index b17d308b891..b8fc2fffc88 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -16,14 +16,7 @@ //! # Bridge Hub Rococo Runtime //! -//! This runtime is also used for Bridge Hub Wococo. We dont want to create -//! another exact copy of Bridge Hub Rococo, so we injected some tweaks backed by `RuntimeFlavor` -//! and `pub storage Flavor: RuntimeFlavor`. (For example this is needed for successful asset -//! transfer between Asset Hub Rococo and Asset Hub Wococo, where we need to have correct -//! `xcm_config::UniversalLocation` with correct `GlobalConsensus`. -//! //! This runtime currently supports bridging between: -//! - Rococo <> Wococo //! - Rococo <> Westend #![cfg_attr(not(feature = "std"), no_std)] @@ -35,13 +28,10 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); pub mod bridge_common_config; -pub mod bridge_to_rococo_config; pub mod bridge_to_westend_config; -pub mod bridge_to_wococo_config; mod weights; pub mod xcm_config; -use codec::{Decode, Encode}; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; @@ -93,15 +83,6 @@ use parachains_common::{ HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, }; -/// Enum for handling differences in the runtime configuration for BridgeHubRococo vs -/// BridgeHubWococo. -#[derive(Default, Eq, PartialEq, Debug, Clone, Copy, Decode, Encode)] -pub enum RuntimeFlavor { - #[default] - Rococo, - Wococo, -} - /// The address format for describing accounts. pub type Address = MultiAddress; @@ -125,11 +106,7 @@ pub type SignedExtra = ( frame_system::CheckWeight, pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, - ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages, - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages, - ), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages,), ); /// Unchecked extrinsic type as expected by this runtime. @@ -529,37 +506,19 @@ construct_runtime!( Utility: pallet_utility::{Pallet, Call, Event} = 40, Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 36, - // Rococo, Wococo and Westend BridgeHubs are sharing the runtime, so this runtime has several sets of - // bridge pallets. - // // BridgeHubRococo uses: - // - BridgeWococoGrandpa // - BridgeWestendGrandpa - // - BridgeWococoParachains // - BridgeWestendParachains - // - BridgeWococoMessages // - BridgeWestendMessages // - BridgeRelayers - // - // BridgeHubWococo uses: - // - BridgeRococoGrandpa - // - BridgeRococoParachains - // - BridgeRococoMessages - // - BridgeRelayers // GRANDPA bridge modules. - BridgeWococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 41, - BridgeRococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 43, BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Event, Config} = 48, // Parachain bridge modules. - BridgeWococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 42, - BridgeRococoParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 44, BridgeWestendParachains: pallet_bridge_parachains::::{Pallet, Call, Storage, Event} = 49, // Messaging bridge modules. - BridgeWococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 46, - BridgeRococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 45, BridgeWestendMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 51, BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event} = 47, @@ -573,11 +532,11 @@ construct_runtime!( bridge_runtime_common::generate_bridge_reject_obsolete_headers_and_messages! { RuntimeCall, AccountId, // Grandpa - BridgeRococoGrandpa, BridgeWococoGrandpa, BridgeWestendGrandpa, + BridgeWestendGrandpa, // Parachains - BridgeRococoParachains, BridgeWococoParachains, BridgeWestendParachains, + BridgeWestendParachains, // Messages - BridgeRococoMessages, BridgeWococoMessages, BridgeWestendMessages + BridgeWestendMessages } #[cfg(feature = "runtime-benchmarks")] @@ -600,15 +559,9 @@ mod benches { [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] // Bridge pallets - [pallet_bridge_grandpa, WococoFinality] [pallet_bridge_grandpa, WestendFinality] - [pallet_bridge_grandpa, RococoFinality] - [pallet_bridge_parachains, WithinWococo] [pallet_bridge_parachains, WithinWestend] - [pallet_bridge_parachains, WithinRococo] - [pallet_bridge_messages, RococoToWococo] [pallet_bridge_messages, RococoToWestend] - [pallet_bridge_messages, WococoToRococo] [pallet_bridge_relayers, BridgeRelayersBench::] ); } @@ -757,26 +710,6 @@ impl_runtime_apis! { } } - impl bp_rococo::RococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeRococoGrandpa::best_finalized() - } - fn synced_headers_grandpa_info( - ) -> Vec> { - BridgeRococoGrandpa::synced_headers_grandpa_info() - } - } - - impl bp_wococo::WococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeWococoGrandpa::best_finalized() - } - fn synced_headers_grandpa_info( - ) -> Vec> { - BridgeWococoGrandpa::synced_headers_grandpa_info() - } - } - impl bp_westend::WestendFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeWestendGrandpa::best_finalized() @@ -787,22 +720,6 @@ impl_runtime_apis! { } } - impl bp_bridge_hub_rococo::BridgeHubRococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeRococoParachains::best_parachain_head_id::< - bp_bridge_hub_rococo::BridgeHubRococo - >().unwrap_or(None) - } - } - - impl bp_bridge_hub_wococo::BridgeHubWococoFinalityApi for Runtime { - fn best_finalized() -> Option> { - BridgeWococoParachains::best_parachain_head_id::< - bp_bridge_hub_wococo::BridgeHubWococo - >().unwrap_or(None) - } - } - impl bp_bridge_hub_westend::BridgeHubWestendFinalityApi for Runtime { fn best_finalized() -> Option> { BridgeWestendParachains::best_parachain_head_id::< @@ -811,33 +728,6 @@ impl_runtime_apis! { } } - // This is exposed by BridgeHubRococo - impl bp_bridge_hub_wococo::FromBridgeHubWococoInboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, - ) -> Vec { - bridge_runtime_common::messages_api::inbound_message_details::< - Runtime, - bridge_to_wococo_config::WithBridgeHubWococoMessagesInstance, - >(lane, messages) - } - } - - // This is exposed by BridgeHubRococo - impl bp_bridge_hub_wococo::ToBridgeHubWococoOutboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec { - bridge_runtime_common::messages_api::outbound_message_details::< - Runtime, - bridge_to_wococo_config::WithBridgeHubWococoMessagesInstance, - >(lane, begin, end) - } - } - // This is exposed by BridgeHubRococo impl bp_bridge_hub_westend::FromBridgeHubWestendInboundLaneApi for Runtime { fn message_details( @@ -865,45 +755,6 @@ impl_runtime_apis! { } } - // This is exposed by BridgeHubWococo - impl bp_bridge_hub_rococo::FromBridgeHubRococoInboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - messages: Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, - ) -> Vec { - // use different instance according to flavor - match xcm_config::Flavor::get() { - RuntimeFlavor::Wococo => { - bridge_runtime_common::messages_api::inbound_message_details::< - Runtime, - bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, - >(lane, messages) - }, - flavor @ _ => unimplemented!("Unsupported `FromBridgeHubRococoInboundLaneApi` for flavor: {:?}", flavor) - } - } - } - - // This is exposed by BridgeHubWococo and BridgeHubWestend - impl bp_bridge_hub_rococo::ToBridgeHubRococoOutboundLaneApi for Runtime { - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec { - // use different instance according to flavor - match xcm_config::Flavor::get() { - RuntimeFlavor::Wococo => { - bridge_runtime_common::messages_api::outbound_message_details::< - Runtime, - bridge_to_rococo_config::WithBridgeHubRococoMessagesInstance, - >(lane, begin, end) - }, - flavor @ _ => unimplemented!("Unsupported `ToBridgeHubRococoOutboundLaneApi` for flavor: {:?}", flavor) - } - } - } - #[cfg(feature = "try-runtime")] impl frame_try_runtime::TryRuntime for Runtime { fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { @@ -943,15 +794,9 @@ impl_runtime_apis! { use pallet_bridge_relayers::benchmarking::Pallet as BridgeRelayersBench; // Change weight file names. - type WococoFinality = BridgeWococoGrandpa; type WestendFinality = BridgeWestendGrandpa; - type RococoFinality = BridgeRococoGrandpa; - type WithinWococo = pallet_bridge_parachains::benchmarking::Pallet::; type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; - type WithinRococo = pallet_bridge_parachains::benchmarking::Pallet::; - type RococoToWococo = pallet_bridge_messages::benchmarking::Pallet ::; type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; - type WococoToRococo = pallet_bridge_messages::benchmarking::Pallet ::; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -1098,7 +943,7 @@ impl_runtime_apis! { fn export_message_origin_and_destination( ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Ok((TokenLocation::get(), NetworkId::Wococo, X1(Parachain(100)))) + Ok((TokenLocation::get(), NetworkId::Westend, X1(Parachain(100)))) } fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { @@ -1109,15 +954,9 @@ impl_runtime_apis! { type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - type WococoFinality = BridgeWococoGrandpa; type WestendFinality = BridgeWestendGrandpa; - type RococoFinality = BridgeRococoGrandpa; - type WithinWococo = pallet_bridge_parachains::benchmarking::Pallet::; type WithinWestend = pallet_bridge_parachains::benchmarking::Pallet::; - type WithinRococo = pallet_bridge_parachains::benchmarking::Pallet::; - type RococoToWococo = pallet_bridge_messages::benchmarking::Pallet ::; type RococoToWestend = pallet_bridge_messages::benchmarking::Pallet ::; - type WococoToRococo = pallet_bridge_messages::benchmarking::Pallet ::; use bridge_runtime_common::messages_benchmarking::{ prepare_message_delivery_proof_from_parachain, @@ -1130,49 +969,6 @@ impl_runtime_apis! { MessageProofParams, }; - impl BridgeMessagesConfig for Runtime { - fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { - let bench_lane_id = >::bench_lane_id(); - let bridged_chain_id = bp_runtime::BRIDGE_HUB_WOCOCO_CHAIN_ID; - pallet_bridge_relayers::Pallet::::relayer_reward( - relayer, - bp_relayers::RewardsAccountParams::new( - bench_lane_id, - bridged_chain_id, - bp_relayers::RewardsAccountOwner::BridgedChain - ) - ).is_some() - } - - fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_to_wococo_config::FromWococoBridgeHubMessagesProof, Weight) { - use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - prepare_message_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaWococoInstance, - bridge_to_wococo_config::WithBridgeHubWococoMessageBridge, - >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Rococo), Parachain(42)))) - } - - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_to_wococo_config::ToWococoBridgeHubMessagesDeliveryProof { - prepare_message_delivery_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaWococoInstance, - bridge_to_wococo_config::WithBridgeHubWococoMessageBridge, - >(params) - } - - fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { - use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() - } - } - impl BridgeMessagesConfig for Runtime { fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { let bench_lane_id = >::bench_lane_id(); @@ -1216,49 +1012,6 @@ impl_runtime_apis! { } } - impl BridgeMessagesConfig for Runtime { - fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool { - let bench_lane_id = >::bench_lane_id(); - let bridged_chain_id = bp_runtime::BRIDGE_HUB_ROCOCO_CHAIN_ID; - pallet_bridge_relayers::Pallet::::relayer_reward( - relayer, - bp_relayers::RewardsAccountParams::new( - bench_lane_id, - bridged_chain_id, - bp_relayers::RewardsAccountOwner::BridgedChain - ) - ).is_some() - } - - fn prepare_message_proof( - params: MessageProofParams, - ) -> (bridge_to_rococo_config::FromRococoBridgeHubMessagesProof, Weight) { - use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); - prepare_message_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, - >(params, generate_xcm_builder_bridge_message_sample(X2(GlobalConsensus(Wococo), Parachain(42)))) - } - - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> bridge_to_rococo_config::ToRococoBridgeHubMessagesDeliveryProof { - prepare_message_delivery_proof_from_parachain::< - Runtime, - bridge_common_config::BridgeGrandpaRococoInstance, - bridge_to_rococo_config::WithBridgeHubRococoMessageBridge, - >(params) - } - - fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { - use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() - } - } - use bridge_runtime_common::parachains_benchmarking::prepare_parachain_heads_proof; use pallet_bridge_parachains::benchmarking::Config as BridgeParachainsConfig; use pallet_bridge_relayers::benchmarking::{ @@ -1266,30 +1019,6 @@ impl_runtime_apis! { Config as BridgeRelayersConfig, }; - impl BridgeParachainsConfig for Runtime { - fn parachains() -> Vec { - use bp_runtime::Parachain; - vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_wococo::BridgeHubWococo::PARACHAIN_ID)] - } - - fn prepare_parachain_heads_proof( - parachains: &[bp_polkadot_core::parachains::ParaId], - parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, - ) -> ( - pallet_bridge_parachains::RelayBlockNumber, - pallet_bridge_parachains::RelayBlockHash, - bp_polkadot_core::parachains::ParaHeadsProof, - Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, - ) { - prepare_parachain_heads_proof::( - parachains, - parachain_head_size, - proof_size, - ) - } - } - impl BridgeParachainsConfig for Runtime { fn parachains() -> Vec { use bp_runtime::Parachain; @@ -1314,30 +1043,6 @@ impl_runtime_apis! { } } - impl BridgeParachainsConfig for Runtime { - fn parachains() -> Vec { - use bp_runtime::Parachain; - vec![bp_polkadot_core::parachains::ParaId(bp_bridge_hub_rococo::BridgeHubRococo::PARACHAIN_ID)] - } - - fn prepare_parachain_heads_proof( - parachains: &[bp_polkadot_core::parachains::ParaId], - parachain_head_size: u32, - proof_size: bp_runtime::StorageProofSize, - ) -> ( - pallet_bridge_parachains::RelayBlockNumber, - pallet_bridge_parachains::RelayBlockHash, - bp_polkadot_core::parachains::ParaHeadsProof, - Vec<(bp_polkadot_core::parachains::ParaId, bp_polkadot_core::parachains::ParaHash)>, - ) { - prepare_parachain_heads_proof::( - parachains, - parachain_head_size, - proof_size, - ) - } - } - impl BridgeRelayersConfig for Runtime { fn prepare_rewards_account( account_params: bp_relayers::RewardsAccountParams, @@ -1418,11 +1123,7 @@ mod tests { frame_system::CheckWeight::new(), pallet_transaction_payment::ChargeTransactionPayment::from(10), BridgeRejectObsoleteHeadersAndMessages, - ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages::default(), - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages::default(), - ), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),) ); // for BridgeHubRococo @@ -1442,24 +1143,6 @@ mod tests { bhr_indirect_payload.additional_signed().unwrap().encode() ) } - - // for BridgeHubWococo - { - let bhw_indirect_payload = bp_bridge_hub_wococo::SignedExtension::from_params( - VERSION.spec_version, - VERSION.transaction_version, - bp_runtime::TransactionEra::Immortal, - System::block_hash(BlockNumber::zero()), - 10, - 10, - (((), ()), ((), ())), - ); - assert_eq!(payload.encode(), bhw_indirect_payload.encode()); - assert_eq!( - payload.additional_signed().unwrap().encode(), - bhw_indirect_payload.additional_signed().unwrap().encode() - ) - } }); } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index 66f8f1edf3c..a615f539547 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -17,6 +17,9 @@ //! Expose the auto generated weight files. +use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; +use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; + pub mod block_weights; pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; @@ -24,15 +27,9 @@ pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; pub mod frame_system; pub mod pallet_balances; -pub mod pallet_bridge_grandpa_rococo_finality; -pub mod pallet_bridge_grandpa_westend_finality; -pub mod pallet_bridge_grandpa_wococo_finality; -pub mod pallet_bridge_messages_rococo_to_westend; -pub mod pallet_bridge_messages_rococo_to_wococo; -pub mod pallet_bridge_messages_wococo_to_rococo; -pub mod pallet_bridge_parachains_within_rococo; -pub mod pallet_bridge_parachains_within_westend; -pub mod pallet_bridge_parachains_within_wococo; +pub mod pallet_bridge_grandpa; +pub mod pallet_bridge_messages; +pub mod pallet_bridge_parachains; pub mod pallet_bridge_relayers; pub mod pallet_collator_selection; pub mod pallet_message_queue; @@ -56,43 +53,7 @@ use frame_support::weights::Weight; // import trait from dependency module use ::pallet_bridge_relayers::WeightInfoExt as _; -impl pallet_bridge_messages::WeightInfoExt - for pallet_bridge_messages_wococo_to_rococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime( - ) - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_delivery_proof_overhead_from_runtime() - } -} - -impl pallet_bridge_messages::WeightInfoExt - for pallet_bridge_messages_rococo_to_wococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_proof_overhead_from_runtime( - ) - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - pallet_bridge_relayers::WeightInfo::::receive_messages_delivery_proof_overhead_from_runtime() - } -} - -impl pallet_bridge_messages::WeightInfoExt - for pallet_bridge_messages_rococo_to_westend::WeightInfo -{ +impl MessagesWeightInfoExt for pallet_bridge_messages::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } @@ -107,26 +68,8 @@ impl pallet_bridge_messages::WeightInfoExt } } -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_rococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_rococo::EXTRA_STORAGE_PROOF_SIZE - } -} - -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_westend::WeightInfo -{ +impl ParachainsWeightInfoExt for pallet_bridge_parachains::WeightInfo { fn expected_extra_storage_proof_size() -> u32 { bp_bridge_hub_westend::EXTRA_STORAGE_PROOF_SIZE } } - -impl pallet_bridge_parachains::WeightInfoExt - for pallet_bridge_parachains_within_wococo::WeightInfo -{ - fn expected_extra_storage_proof_size() -> u32 { - bp_bridge_hub_wococo::EXTRA_STORAGE_PROOF_SIZE - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs index 8ef05f17856..aaa6a3e0622 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_grandpa -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_grandpa +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,33 +48,31 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_grandpa`. pub struct WeightInfo(PhantomData); impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: BridgeRococoGrandpa PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa BestFinalized (r:1 w:1) - /// Proof: BridgeRococoGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: 531, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa CurrentAuthoritySet (r:1 w:0) - /// Proof: BridgeRococoGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(50250), added: 50745, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHashesPointer (r:1 w:1) - /// Proof: BridgeRococoGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHashes (r:1 w:1) - /// Proof: BridgeRococoGrandpa ImportedHashes (max_values: Some(1024), max_size: Some(36), added: 1521, mode: MaxEncodedLen) - /// Storage: BridgeRococoGrandpa ImportedHeaders (r:0 w:2) - /// Proof: BridgeRococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. + /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) + /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 838]`. /// The range of component `v` is `[50, 100]`. fn submit_finality_proof(p: u32, v: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `231 + p * (60 ±0)` + // Measured: `335 + p * (60 ±0)` // Estimated: `51735` - // Minimum execution time: 241_332_000 picoseconds. - Weight::from_parts(69_790_821, 0) + // Minimum execution time: 311_267_000 picoseconds. + Weight::from_parts(313_903_000, 0) .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 6_013 - .saturating_add(Weight::from_parts(47_580_554, 0).saturating_mul(p.into())) - // Standard Error: 100_298 - .saturating_add(Weight::from_parts(1_213_475, 0).saturating_mul(v.into())) + // Standard Error: 4_779 + .saturating_add(Weight::from_parts(55_265_953, 0).saturating_mul(p.into())) + // Standard Error: 36_883 + .saturating_add(Weight::from_parts(153_660, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(5)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs deleted file mode 100644 index 0bb798bd9ec..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_rococo_finality.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeRococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `268 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 304_726_000 picoseconds. - Weight::from_parts(16_868_060, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_802 - .saturating_add(Weight::from_parts(55_200_017, 0).saturating_mul(p.into())) - // Standard Error: 46_745 - .saturating_add(Weight::from_parts(2_689_151, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs deleted file mode 100644 index 4ed140b7d17..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_westend_finality.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeWestendGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeWestendGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `335 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 305_905_000 picoseconds. - Weight::from_parts(2_636_863, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_724 - .saturating_add(Weight::from_parts(55_199_477, 0).saturating_mul(p.into())) - // Standard Error: 45_444 - .saturating_add(Weight::from_parts(2_835_596, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs deleted file mode 100644 index a82854e0c67..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_grandpa_wococo_finality.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_grandpa -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_grandpa`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_grandpa::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoGrandpa::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::BestFinalized` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::BestFinalized` (`max_values`: Some(1), `max_size`: Some(36), added: 531, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::CurrentAuthoritySet` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::CurrentAuthoritySet` (`max_values`: Some(1), `max_size`: Some(50250), added: 50745, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHashesPointer` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::ImportedHashesPointer` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHashes` (r:1 w:1) - /// Proof: `BridgeWococoGrandpa::ImportedHashes` (`max_values`: Some(1024), `max_size`: Some(36), added: 1521, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:0 w:2) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - /// The range of component `p` is `[1, 838]`. - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `302 + p * (60 ±0)` - // Estimated: `51735` - // Minimum execution time: 305_146_000 picoseconds. - Weight::from_parts(308_711_000, 0) - .saturating_add(Weight::from_parts(0, 51735)) - // Standard Error: 2_651 - .saturating_add(Weight::from_parts(55_082_480, 0).saturating_mul(p.into())) - // Standard Error: 20_462 - .saturating_add(Weight::from_parts(298_367, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs index 319a4de8e96..17a45df5bfb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_messages` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_messages -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_messages +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,184 +48,195 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_messages`. pub struct WeightInfo(PhantomData); impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 43_187_000 picoseconds. - Weight::from_parts(43_681_000, 0) + // Minimum execution time: 41_577_000 picoseconds. + Weight::from_parts(42_621_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_two_messages_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 54_131_000 picoseconds. - Weight::from_parts(54_813_000, 0) + // Minimum execution time: 52_880_000 picoseconds. + Weight::from_parts(53_697_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn receive_single_message_proof_with_outbound_lane_state() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `538` // Estimated: `52645` - // Minimum execution time: 48_120_000 picoseconds. - Weight::from_parts(48_733_000, 0) + // Minimum execution time: 47_424_000 picoseconds. + Weight::from_parts(48_445_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) + .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_1_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `506` // Estimated: `52645` - // Minimum execution time: 41_028_000 picoseconds. - Weight::from_parts(41_635_000, 0) + // Minimum execution time: 40_619_000 picoseconds. + Weight::from_parts(42_262_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) fn receive_single_message_proof_16_kb() -> Weight { // Proof Size summary in bytes: - // Measured: `335` + // Measured: `506` // Estimated: `52645` - // Minimum execution time: 68_499_000 picoseconds. - Weight::from_parts(69_263_000, 0) + // Minimum execution time: 74_603_000 picoseconds. + Weight::from_parts(78_209_000, 0) .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_single_message() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 32_277_000 picoseconds. - Weight::from_parts(32_880_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) + // Measured: `377` + // Estimated: `3842` + // Minimum execution time: 33_762_000 picoseconds. + Weight::from_parts(34_405_000, 0) + .saturating_add(Weight::from_parts(0, 3842)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { // Proof Size summary in bytes: - // Measured: `339` - // Estimated: `3804` - // Minimum execution time: 32_504_000 picoseconds. - Weight::from_parts(33_085_000, 0) - .saturating_add(Weight::from_parts(0, 3804)) + // Measured: `377` + // Estimated: `3842` + // Minimum execution time: 33_805_000 picoseconds. + Weight::from_parts(35_051_000, 0) + .saturating_add(Weight::from_parts(0, 3842)) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages OutboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: 539, mode: MaxEncodedLen) - /// Storage: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof Skipped: unknown `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(73), added: 2548, mode: MaxEncodedLen) + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) + /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) + /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { // Proof Size summary in bytes: - // Measured: `339` + // Measured: `377` // Estimated: `6086` - // Minimum execution time: 34_963_000 picoseconds. - Weight::from_parts(35_473_000, 0) + // Minimum execution time: 38_612_000 picoseconds. + Weight::from_parts(39_412_000, 0) .saturating_add(Weight::from_parts(0, 6086)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeRococoMessages PalletOperatingMode (r:1 w:0) - /// Proof: BridgeRococoMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), added: 497, mode: MaxEncodedLen) - /// Storage: BridgeRococoParachain ImportedParaHeads (r:1 w:0) - /// Proof: BridgeRococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// Storage: BridgeRococoMessages InboundLanes (r:1 w:1) - /// Proof: BridgeRococoMessages InboundLanes (max_values: None, max_size: Some(49180), added: 51655, mode: MaxEncodedLen) - /// Storage: ParachainInfo ParachainId (r:1 w:0) - /// Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - /// Storage: PolkadotXcm SupportedVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - /// Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem RelevantMessagingState (r:1 w:0) - /// Proof Skipped: ParachainSystem RelevantMessagingState (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmpQueue OutboundXcmpStatus (r:1 w:1) - /// Proof Skipped: XcmpQueue OutboundXcmpStatus (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmpQueue OutboundXcmpMessages (r:0 w:1) - /// Proof Skipped: XcmpQueue OutboundXcmpMessages (max_values: None, max_size: None, mode: Measured) - /// The range of component `i` is `[128, 2048]`. + /// Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) + /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:1 w:0) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendMessages::InboundLanes` (r:1 w:1) + /// Proof: `BridgeWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) + /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) + /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) + /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) + /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) /// The range of component `i` is `[128, 2048]`. fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `635` + // Measured: `669` // Estimated: `52645` - // Minimum execution time: 129_978_000 picoseconds. - Weight::from_parts(98_246_356, 0) + // Minimum execution time: 69_285_000 picoseconds. + Weight::from_parts(70_867_498, 0) .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 2_554 - .saturating_add(Weight::from_parts(544_728, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(9)) + // Standard Error: 111 + .saturating_add(Weight::from_parts(7_489, 0).saturating_mul(i.into())) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(4)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs deleted file mode 100644 index 6513b63474a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_westend.rs +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `643` - // Estimated: `52645` - // Minimum execution time: 41_873_000 picoseconds. - Weight::from_parts(43_434_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `643` - // Estimated: `52645` - // Minimum execution time: 53_328_000 picoseconds. - Weight::from_parts(54_592_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `643` - // Estimated: `52645` - // Minimum execution time: 47_486_000 picoseconds. - Weight::from_parts(48_721_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `611` - // Estimated: `52645` - // Minimum execution time: 41_093_000 picoseconds. - Weight::from_parts(42_050_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `611` - // Estimated: `52645` - // Minimum execution time: 71_947_000 picoseconds. - Weight::from_parts(74_564_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `3947` - // Minimum execution time: 31_235_000 picoseconds. - Weight::from_parts(32_051_000, 0) - .saturating_add(Weight::from_parts(0, 3947)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `3947` - // Minimum execution time: 31_320_000 picoseconds. - Weight::from_parts(31_973_000, 0) - .saturating_add(Weight::from_parts(0, 3947)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `482` - // Estimated: `6086` - // Minimum execution time: 33_656_000 picoseconds. - Weight::from_parts(34_779_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoToWestendMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWestendMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWestendMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `772` - // Estimated: `52645` - // Minimum execution time: 61_671_000 picoseconds. - Weight::from_parts(62_656_321, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 25 - .saturating_add(Weight::from_parts(6_641, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs deleted file mode 100644 index e2f58cdfad5..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_rococo_to_wococo.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 42_407_000 picoseconds. - Weight::from_parts(43_917_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 53_258_000 picoseconds. - Weight::from_parts(55_144_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `609` - // Estimated: `52645` - // Minimum execution time: 47_950_000 picoseconds. - Weight::from_parts(49_315_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `577` - // Estimated: `52645` - // Minimum execution time: 41_383_000 picoseconds. - Weight::from_parts(42_898_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `577` - // Estimated: `52645` - // Minimum execution time: 72_118_000 picoseconds. - Weight::from_parts(74_643_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `3913` - // Minimum execution time: 30_993_000 picoseconds. - Weight::from_parts(31_793_000, 0) - .saturating_add(Weight::from_parts(0, 3913)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `3913` - // Minimum execution time: 30_894_000 picoseconds. - Weight::from_parts(31_925_000, 0) - .saturating_add(Weight::from_parts(0, 3913)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `448` - // Estimated: `6086` - // Minimum execution time: 33_804_000 picoseconds. - Weight::from_parts(34_560_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoToWococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeRococoToWococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `811` - // Estimated: `52645` - // Minimum execution time: 62_616_000 picoseconds. - Weight::from_parts(64_073_891, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 43 - .saturating_add(Weight::from_parts(6_525, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs deleted file mode 100644 index d9c0fd15468..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_messages_wococo_to_rococo.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_messages -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_messages`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_messages::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 42_086_000 picoseconds. - Weight::from_parts(42_833_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 51_927_000 picoseconds. - Weight::from_parts(53_847_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `52645` - // Minimum execution time: 47_218_000 picoseconds. - Weight::from_parts(48_380_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `52645` - // Minimum execution time: 40_585_000 picoseconds. - Weight::from_parts(41_714_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:0) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `52645` - // Minimum execution time: 71_197_000 picoseconds. - Weight::from_parts(73_983_000, 0) - .saturating_add(Weight::from_parts(0, 52645)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `3841` - // Minimum execution time: 30_823_000 picoseconds. - Weight::from_parts(31_501_000, 0) - .saturating_add(Weight::from_parts(0, 3841)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:1 w:1) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `3841` - // Minimum execution time: 30_854_000 picoseconds. - Weight::from_parts(31_663_000, 0) - .saturating_add(Weight::from_parts(0, 3841)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::OutboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x6e0a18b62a1de81c5f519181cc611e18` (r:1 w:0) - /// Storage: `BridgeRelayers::RelayerRewards` (r:2 w:2) - /// Proof: `BridgeRelayers::RelayerRewards` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `6086` - // Minimum execution time: 33_463_000 picoseconds. - Weight::from_parts(34_290_000, 0) - .saturating_add(Weight::from_parts(0, 6086)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoToRococoMessages::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoToRococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:1 w:0) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoToRococoMessages::InboundLanes` (r:1 w:1) - /// Proof: `BridgeWococoToRococoMessages::InboundLanes` (`max_values`: None, `max_size`: Some(49180), added: 51655, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::DeliveryFeeFactor` (r:1 w:0) - /// Proof: `XcmpQueue::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `739` - // Estimated: `52645` - // Minimum execution time: 61_523_000 picoseconds. - Weight::from_parts(62_686_055, 0) - .saturating_add(Weight::from_parts(0, 52645)) - // Standard Error: 26 - .saturating_add(Weight::from_parts(6_563, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs index bd7384a05fe..5c7c4a63682 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs @@ -1,40 +1,41 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_parachains` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --chain=bridge-hub-rococo-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_bridge_parachains -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_bridge_parachains +// --chain=bridge-hub-rococo-dev +// --header=./cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,64 +48,63 @@ use core::marker::PhantomData; /// Weight functions for `pallet_bridge_parachains`. pub struct WeightInfo(PhantomData); impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) - /// The range of component `p` is `[1, 2]`. + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) /// The range of component `p` is `[1, 2]`. fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 34_759_000 picoseconds. - Weight::from_parts(35_709_034, 0) + // Minimum execution time: 31_987_000 picoseconds. + Weight::from_parts(33_060_534, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_1kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 36_005_000 picoseconds. - Weight::from_parts(36_492_000, 0) + // Minimum execution time: 33_360_000 picoseconds. + Weight::from_parts(34_182_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: BridgeWococoParachain PalletOperatingMode (r:1 w:0) - /// Proof: BridgeWococoParachain PalletOperatingMode (max_values: Some(1), max_size: Some(1), added: 496, mode: MaxEncodedLen) - /// Storage: BridgeWococoGrandpa ImportedHeaders (r:1 w:0) - /// Proof: BridgeWococoGrandpa ImportedHeaders (max_values: Some(1024), max_size: Some(68), added: 1553, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ParasInfo (r:1 w:1) - /// Proof: BridgeWococoParachain ParasInfo (max_values: Some(1), max_size: Some(60), added: 555, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHashes (r:1 w:1) - /// Proof: BridgeWococoParachain ImportedParaHashes (max_values: Some(64), max_size: Some(64), added: 1054, mode: MaxEncodedLen) - /// Storage: BridgeWococoParachain ImportedParaHeads (r:0 w:1) - /// Proof: BridgeWococoParachain ImportedParaHeads (max_values: Some(64), max_size: Some(196), added: 1186, mode: MaxEncodedLen) + /// Storage: `BridgeWestendParachains::PalletOperatingMode` (r:1 w:0) + /// Proof: `BridgeWestendParachains::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) + /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ParasInfo` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHashes` (r:1 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) + /// Storage: `BridgeWestendParachains::ImportedParaHeads` (r:0 w:1) + /// Proof: `BridgeWestendParachains::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) fn submit_parachain_heads_with_16kb_proof() -> Weight { // Proof Size summary in bytes: - // Measured: `367` + // Measured: `434` // Estimated: `2543` - // Minimum execution time: 62_374_000 picoseconds. - Weight::from_parts(62_977_000, 0) + // Minimum execution time: 65_246_000 picoseconds. + Weight::from_parts(65_985_000, 0) .saturating_add(Weight::from_parts(0, 2543)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(3)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs deleted file mode 100644 index e36bbcca42e..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_rococo.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(_p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 31_241_000 picoseconds. - Weight::from_parts(32_488_584, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 32_962_000 picoseconds. - Weight::from_parts(33_658_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeRococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeRococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeRococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeRococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeRococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `367` - // Estimated: `2543` - // Minimum execution time: 62_685_000 picoseconds. - Weight::from_parts(64_589_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs deleted file mode 100644 index bfe93b4c36a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_westend.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 31_493_000 picoseconds. - Weight::from_parts(32_511_270, 0) - .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 33_650 - .saturating_add(Weight::from_parts(20_764, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 32_976_000 picoseconds. - Weight::from_parts(33_647_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWestendParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWestendParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWestendGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWestendParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWestendParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `468` - // Estimated: `2543` - // Minimum execution time: 62_898_000 picoseconds. - Weight::from_parts(64_463_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs deleted file mode 100644 index d685daf930f..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_parachains_within_wococo.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_bridge_parachains` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_bridge_parachains -// --chain=bridge-hub-rococo-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_bridge_parachains`. -pub struct WeightInfo(PhantomData); -impl pallet_bridge_parachains::WeightInfo for WeightInfo { - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 31_573_000 picoseconds. - Weight::from_parts(32_739_400, 0) - .saturating_add(Weight::from_parts(0, 2543)) - // Standard Error: 49_518 - .saturating_add(Weight::from_parts(5_166, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 32_780_000 picoseconds. - Weight::from_parts(33_797_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `BridgeWococoParachain::PalletOperatingMode` (r:1 w:0) - /// Proof: `BridgeWococoParachain::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoGrandpa::ImportedHeaders` (r:1 w:0) - /// Proof: `BridgeWococoGrandpa::ImportedHeaders` (`max_values`: Some(1024), `max_size`: Some(68), added: 1553, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ParasInfo` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ParasInfo` (`max_values`: Some(1), `max_size`: Some(60), added: 555, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHashes` (r:1 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHashes` (`max_values`: Some(64), `max_size`: Some(64), added: 1054, mode: `MaxEncodedLen`) - /// Storage: `BridgeWococoParachain::ImportedParaHeads` (r:0 w:1) - /// Proof: `BridgeWococoParachain::ImportedParaHeads` (`max_values`: Some(64), `max_size`: Some(196), added: 1186, mode: `MaxEncodedLen`) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `2543` - // Minimum execution time: 62_847_000 picoseconds. - Weight::from_parts(63_991_000, 0) - .saturating_add(Weight::from_parts(0, 2543)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs index 48f0c1f949b..70af694645d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_bridge_relayers.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_bridge_relayers` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-rococo-dev")`, DB CACHE: 1024 // Executed Command: @@ -55,8 +56,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `207` // Estimated: `3593` - // Minimum execution time: 45_338_000 picoseconds. - Weight::from_parts(45_836_000, 0) + // Minimum execution time: 46_579_000 picoseconds. + Weight::from_parts(48_298_000, 0) .saturating_add(Weight::from_parts(0, 3593)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -71,8 +72,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `61` // Estimated: `4714` - // Minimum execution time: 23_561_000 picoseconds. - Weight::from_parts(24_012_000, 0) + // Minimum execution time: 24_219_000 picoseconds. + Weight::from_parts(24_993_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -85,8 +86,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `160` // Estimated: `4714` - // Minimum execution time: 25_133_000 picoseconds. - Weight::from_parts(25_728_000, 0) + // Minimum execution time: 26_279_000 picoseconds. + Weight::from_parts(26_810_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) @@ -101,8 +102,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `263` // Estimated: `4714` - // Minimum execution time: 27_356_000 picoseconds. - Weight::from_parts(27_828_000, 0) + // Minimum execution time: 27_672_000 picoseconds. + Weight::from_parts(28_946_000, 0) .saturating_add(Weight::from_parts(0, 4714)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -113,8 +114,8 @@ impl pallet_bridge_relayers::WeightInfo for WeightInfo< // Proof Size summary in bytes: // Measured: `6` // Estimated: `3538` - // Minimum execution time: 2_955_000 picoseconds. - Weight::from_parts(3_084_000, 0) + // Minimum execution time: 5_487_000 picoseconds. + Weight::from_parts(5_725_000, 0) .saturating_add(Weight::from_parts(0, 3538)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index cb7ad7a7803..d7e8c41ff8a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -53,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 19_037_000 picoseconds. - Weight::from_parts(19_602_000, 3593) + // Minimum execution time: 19_610_000 picoseconds. + Weight::from_parts(19_980_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -64,15 +65,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 43_115_000 picoseconds. - Weight::from_parts(43_897_000, 6196) + // Minimum execution time: 44_411_000 picoseconds. + Weight::from_parts(45_110_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -89,11 +88,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `294` + // Measured: `223` // Estimated: `8799` - // Minimum execution time: 90_267_000 picoseconds. - Weight::from_parts(91_460_000, 8799) - .saturating_add(T::DbWeight::get().reads(11)) + // Minimum execution time: 89_739_000 picoseconds. + Weight::from_parts(91_256_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } // Storage: `Benchmark::Override` (r:0 w:0) @@ -105,8 +104,6 @@ impl WeightInfo { // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. Weight::from_parts(18_446_744_073_709_551_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -125,19 +122,19 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 60_477_000 picoseconds. - Weight::from_parts(61_314_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_045_000 picoseconds. + Weight::from_parts(60_710_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_996_000 picoseconds. - Weight::from_parts(3_107_000, 0) + // Minimum execution time: 3_257_000 picoseconds. + Weight::from_parts(3_392_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -145,15 +142,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 18_907_000 picoseconds. - Weight::from_parts(19_475_000, 3593) + // Minimum execution time: 19_423_000 picoseconds. + Weight::from_parts(19_823_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } // Storage: `System::Account` (r:2 w:2) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -170,15 +165,13 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `193` + // Measured: `122` // Estimated: `6196` - // Minimum execution time: 59_143_000 picoseconds. - Weight::from_parts(60_316_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_484_000 picoseconds. + Weight::from_parts(61_634_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -197,11 +190,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `141` - // Estimated: `3606` - // Minimum execution time: 44_459_000 picoseconds. - Weight::from_parts(45_365_000, 3606) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `70` + // Estimated: `3593` + // Minimum execution time: 44_863_000 picoseconds. + Weight::from_parts(45_549_000, 3593) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 4eee8f0e613..0ae6d0b5623 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -47,8 +48,6 @@ use sp_std::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -67,81 +66,79 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 62_732_000 picoseconds. - Weight::from_parts(64_581_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 63_453_000 picoseconds. + Weight::from_parts(64_220_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_987_000 picoseconds. - Weight::from_parts(2_107_000, 0) + // Minimum execution time: 2_238_000 picoseconds. + Weight::from_parts(2_351_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3568` - // Minimum execution time: 8_098_000 picoseconds. - Weight::from_parts(8_564_000, 3568) + // Measured: `32` + // Estimated: `3497` + // Minimum execution time: 7_953_000 picoseconds. + Weight::from_parts(8_162_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_539_000 picoseconds. - Weight::from_parts(9_085_000, 0) + // Minimum execution time: 9_080_000 picoseconds. + Weight::from_parts(9_333_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_205_000 picoseconds. - Weight::from_parts(2_369_000, 0) + // Minimum execution time: 2_415_000 picoseconds. + Weight::from_parts(2_519_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_828_000 picoseconds. - Weight::from_parts(1_994_000, 0) + // Minimum execution time: 2_045_000 picoseconds. + Weight::from_parts(2_184_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_869_000 picoseconds. - Weight::from_parts(1_946_000, 0) + // Minimum execution time: 2_065_000 picoseconds. + Weight::from_parts(2_125_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_842_000 picoseconds. - Weight::from_parts(1_949_000, 0) + // Minimum execution time: 2_077_000 picoseconds. + Weight::from_parts(2_164_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_460_000 picoseconds. - Weight::from_parts(2_593_000, 0) + // Minimum execution time: 2_868_000 picoseconds. + Weight::from_parts(2_933_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_868_000 picoseconds. - Weight::from_parts(2_003_000, 0) + // Minimum execution time: 2_058_000 picoseconds. + Weight::from_parts(2_164_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -160,21 +157,21 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 56_813_000 picoseconds. - Weight::from_parts(57_728_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 55_971_000 picoseconds. + Weight::from_parts(56_869_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `160` - // Estimated: `3625` - // Minimum execution time: 11_364_000 picoseconds. - Weight::from_parts(11_872_000, 3625) + // Measured: `90` + // Estimated: `3555` + // Minimum execution time: 11_382_000 picoseconds. + Weight::from_parts(11_672_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -182,8 +179,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_821_000 picoseconds. - Weight::from_parts(1_936_000, 0) + // Minimum execution time: 2_071_000 picoseconds. + Weight::from_parts(2_193_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -201,10 +198,10 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 23_081_000 picoseconds. - Weight::from_parts(23_512_000, 3574) + // Measured: `38` + // Estimated: `3503` + // Minimum execution time: 22_573_000 picoseconds. + Weight::from_parts(23_423_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -214,47 +211,45 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_747_000 picoseconds. - Weight::from_parts(4_068_000, 0) + // Minimum execution time: 3_870_000 picoseconds. + Weight::from_parts(3_993_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_045_000 picoseconds. - Weight::from_parts(3_208_000, 0) + // Minimum execution time: 3_483_000 picoseconds. + Weight::from_parts(3_598_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_962_000 picoseconds. - Weight::from_parts(2_284_000, 0) + // Minimum execution time: 2_241_000 picoseconds. + Weight::from_parts(2_297_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_951_000 picoseconds. - Weight::from_parts(2_026_000, 0) + // Minimum execution time: 2_230_000 picoseconds. + Weight::from_parts(2_318_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_837_000 picoseconds. - Weight::from_parts(2_084_000, 0) + // Minimum execution time: 2_051_000 picoseconds. + Weight::from_parts(2_153_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_042_000 picoseconds. - Weight::from_parts(2_145_000, 0) + // Minimum execution time: 2_306_000 picoseconds. + Weight::from_parts(2_380_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -273,22 +268,20 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 61_350_000 picoseconds. - Weight::from_parts(62_440_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 60_201_000 picoseconds. + Weight::from_parts(61_132_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_993_000 picoseconds. - Weight::from_parts(5_309_000, 0) + // Minimum execution time: 4_554_000 picoseconds. + Weight::from_parts(4_704_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -307,70 +300,68 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `171` // Estimated: `6196` - // Minimum execution time: 57_133_000 picoseconds. - Weight::from_parts(58_100_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 56_071_000 picoseconds. + Weight::from_parts(56_889_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_899_000 picoseconds. - Weight::from_parts(2_153_000, 0) + // Minimum execution time: 2_093_000 picoseconds. + Weight::from_parts(2_169_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_880_000 picoseconds. - Weight::from_parts(1_960_000, 0) + // Minimum execution time: 2_027_000 picoseconds. + Weight::from_parts(2_172_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_825_000 picoseconds. - Weight::from_parts(1_960_000, 0) + // Minimum execution time: 2_035_000 picoseconds. + Weight::from_parts(2_164_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::PalletOperatingMode` (r:1 w:0) - // Proof: `BridgeRococoToWococoMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundLanes` (r:1 w:1) - // Proof: `BridgeRococoToWococoMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (r:1 w:0) - // Proof: `BridgeRococoToWococoMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) - // Storage: `BridgeRococoToWococoMessages::OutboundMessages` (r:0 w:1) - // Proof: `BridgeRococoToWococoMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::PalletOperatingMode` (r:1 w:0) + // Proof: `BridgeWestendMessages::PalletOperatingMode` (`max_values`: Some(1), `max_size`: Some(2), added: 497, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundLanes` (r:1 w:1) + // Proof: `BridgeWestendMessages::OutboundLanes` (`max_values`: Some(1), `max_size`: Some(44), added: 539, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundLanesCongestedSignals` (r:1 w:0) + // Proof: `BridgeWestendMessages::OutboundLanesCongestedSignals` (`max_values`: Some(1), `max_size`: Some(21), added: 516, mode: `MaxEncodedLen`) + // Storage: `BridgeWestendMessages::OutboundMessages` (r:0 w:1) + // Proof: `BridgeWestendMessages::OutboundMessages` (`max_values`: None, `max_size`: Some(2621472), added: 2623947, mode: `MaxEncodedLen`) /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `3604` - // Minimum execution time: 28_419_000 picoseconds. - Weight::from_parts(29_387_791, 3604) - // Standard Error: 552 - .saturating_add(Weight::from_parts(316_277, 0).saturating_mul(x.into())) - .saturating_add(T::DbWeight::get().reads(5)) + // Measured: `96` + // Estimated: `1529` + // Minimum execution time: 25_636_000 picoseconds. + Weight::from_parts(25_405_640, 1529) + // Standard Error: 321 + .saturating_add(Weight::from_parts(365_002, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_903_000 picoseconds. - Weight::from_parts(2_023_000, 0) + // Minimum execution time: 2_036_000 picoseconds. + Weight::from_parts(2_136_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_963_000 picoseconds. - Weight::from_parts(2_143_000, 0) + // Minimum execution time: 2_147_000 picoseconds. + Weight::from_parts(2_276_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 1b1e6f8ba71..1436c5b96a3 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -16,12 +16,11 @@ use super::{ AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, ParachainInfo, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeFlavor, RuntimeOrigin, + ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, }; use crate::bridge_common_config::{ - BridgeGrandpaRococoInstance, BridgeGrandpaWestendInstance, BridgeGrandpaWococoInstance, - DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, + BridgeGrandpaWestendInstance, DeliveryRewardInBalance, RequiredStakeForStakeAndSlash, }; use bp_messages::LaneId; use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; @@ -60,9 +59,9 @@ use xcm_executor::{ }; parameter_types! { - pub storage Flavor: RuntimeFlavor = RuntimeFlavor::default(); pub const TokenLocation: MultiLocation = MultiLocation::parent(); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); + pub RelayNetwork: NetworkId = NetworkId::Rococo; pub UniversalLocation: InteriorMultiLocation = X2(GlobalConsensus(RelayNetwork::get()), Parachain(ParachainInfo::parachain_id().into())); pub const MaxInstructions: u32 = 100; @@ -71,22 +70,6 @@ parameter_types! { pub RelayTreasuryLocation: MultiLocation = (Parent, PalletInstance(rococo_runtime_constants::TREASURY_PALLET_ID)).into(); } -/// Adapter for resolving `NetworkId` based on `pub storage Flavor: RuntimeFlavor`. -pub struct RelayNetwork; -impl Get> for RelayNetwork { - fn get() -> Option { - Some(Self::get()) - } -} -impl Get for RelayNetwork { - fn get() -> NetworkId { - match Flavor::get() { - RuntimeFlavor::Rococo => NetworkId::Rococo, - RuntimeFlavor::Wococo => NetworkId::Wococo, - } - } -} - /// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used /// when determining ownership of accounts for asset transacting and when attempting to use XCM /// `Transact` in order to determine the dispatch Origin. @@ -170,8 +153,7 @@ impl Contains for SafeCallFilter { RuntimeCall::System(frame_system::Call::set_storage { items }) if items.iter().all(|(k, _)| { k.eq(&DeliveryRewardInBalance::key()) | - k.eq(&RequiredStakeForStakeAndSlash::key()) | - k.eq(&Flavor::key()) + k.eq(&RequiredStakeForStakeAndSlash::key()) }) => return true, _ => (), @@ -199,17 +181,9 @@ impl Contains for SafeCallFilter { ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | - RuntimeCall::BridgeRococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaRococoInstance, - >::initialize { .. }) | RuntimeCall::BridgeWestendGrandpa(pallet_bridge_grandpa::Call::< Runtime, BridgeGrandpaWestendInstance, - >::initialize { .. }) | - RuntimeCall::BridgeWococoGrandpa(pallet_bridge_grandpa::Call::< - Runtime, - BridgeGrandpaWococoInstance, >::initialize { .. }) ) } @@ -298,13 +272,6 @@ impl xcm_executor::Config for XcmConfig { type FeeManager = XcmFeeManagerFromComponents< WaivedLocations, ( - XcmExportFeeToRelayerRewardAccounts< - Self::AssetTransactor, - crate::bridge_to_wococo_config::WococoGlobalConsensusNetwork, - crate::bridge_to_wococo_config::AssetHubWococoParaId, - crate::bridge_to_wococo_config::BridgeHubWococoChainId, - crate::bridge_to_wococo_config::AssetHubRococoToAssetHubWococoMessagesLane, - >, XcmExportFeeToRelayerRewardAccounts< Self::AssetTransactor, crate::bridge_to_westend_config::WestendGlobalConsensusNetwork, @@ -312,21 +279,10 @@ impl xcm_executor::Config for XcmConfig { crate::bridge_to_westend_config::BridgeHubWestendChainId, crate::bridge_to_westend_config::AssetHubRococoToAssetHubWestendMessagesLane, >, - XcmExportFeeToRelayerRewardAccounts< - Self::AssetTransactor, - crate::bridge_to_rococo_config::RococoGlobalConsensusNetwork, - crate::bridge_to_rococo_config::AssetHubRococoParaId, - crate::bridge_to_rococo_config::BridgeHubRococoChainId, - crate::bridge_to_rococo_config::AssetHubWococoToAssetHubRococoMessagesLane, - >, XcmFeeToAccount, ), >; - type MessageExporter = ( - crate::bridge_to_westend_config::ToBridgeHubWestendHaulBlobExporter, - crate::bridge_to_wococo_config::ToBridgeHubWococoHaulBlobExporter, - crate::bridge_to_rococo_config::ToBridgeHubRococoHaulBlobExporter, - ); + type MessageExporter = (crate::bridge_to_westend_config::ToBridgeHubWestendHaulBlobExporter,); type UniversalAliases = Nothing; type CallDispatcher = WithOriginFilter; type SafeCallFilter = SafeCallFilter; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index 39ee2576f5b..9597d71f6b2 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -18,8 +18,7 @@ use bp_polkadot_core::Signature; use bridge_hub_rococo_runtime::{ - bridge_common_config, bridge_to_rococo_config, bridge_to_westend_config, - bridge_to_wococo_config, + bridge_common_config, bridge_to_westend_config, xcm_config::{RelayNetwork, TokenLocation, XcmConfig}, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, ExistentialDeposit, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, @@ -57,11 +56,7 @@ fn construct_extrinsic( frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), - ( - bridge_to_wococo_config::OnBridgeHubRococoRefundBridgeHubWococoMessages::default(), - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_rococo_config::OnBridgeHubWococoRefundBridgeHubRococoMessages::default(), - ), + (bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(),), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); @@ -105,18 +100,13 @@ fn collator_session_keys() -> bridge_hub_test_utils::CollatorSessionKeys( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeWococoGrandpa(call).encode()), - ); // for Westend finality bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< Runtime, @@ -195,28 +176,6 @@ mod bridge_hub_rococo_tests { #[test] fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { - // for Wococo - bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< - Runtime, - XcmConfig, - WithBridgeHubWococoMessagesInstance, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::BridgeWococoMessages(event)) => Some(event), - _ => None, - } - }), - || ExportMessage { network: Wococo, destination: X1(Parachain(1234)), xcm: Xcm(vec![]) }, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - Some((TokenLocation::get(), ExistentialDeposit::get()).into()), - // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` - Some((TokenLocation::get(), bp_bridge_hub_rococo::BridgeHubRococoBaseXcmFeeInRocs::get()).into()), - || (), - ); // for Westend bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< Runtime, @@ -243,41 +202,13 @@ mod bridge_hub_rococo_tests { #[test] fn message_dispatch_routing_works() { - // from Wococo - bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - WithBridgeHubWococoMessagesInstance, - RelayNetwork, - WococoGlobalConsensusNetwork, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ParachainSystem(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - || (), - ); // from Westend bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< Runtime, AllPalletsWithoutSystem, XcmConfig, ParachainSystem, - WithBridgeHubWococoMessagesInstance, + WithBridgeHubWestendMessagesInstance, RelayNetwork, WestendGlobalConsensusNetwork, >( @@ -303,25 +234,6 @@ mod bridge_hub_rococo_tests { #[test] fn relayed_incoming_message_works() { - // from Wococo - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaWococoInstance, - BridgeParachainWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Rococo, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - || (), - ); // from Westend bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< Runtime, @@ -345,29 +257,6 @@ mod bridge_hub_rococo_tests { #[test] pub fn complex_relay_extrinsic_works() { - // for Wococo - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaWococoInstance, - BridgeParachainWococoInstance, - WithBridgeHubWococoMessagesInstance, - WithBridgeHubWococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - BridgeHubWococoChainId::get(), - Rococo, - XCM_LANE_FOR_ASSET_HUB_ROCOCO_TO_ASSET_HUB_WOCOCO, - ExistentialDeposit::get(), - executive_init_block, - construct_and_apply_extrinsic, - || (), - ); // for Westend bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< Runtime, @@ -457,275 +346,3 @@ mod bridge_hub_rococo_tests { ); } } - -mod bridge_hub_wococo_tests { - use super::*; - use bridge_common_config::{ - BridgeGrandpaRococoInstance, BridgeParachainRococoInstance, DeliveryRewardInBalance, - RequiredStakeForStakeAndSlash, - }; - use bridge_hub_rococo_runtime::{xcm_config, AllPalletsWithoutSystem, RuntimeFlavor}; - use bridge_to_rococo_config::{ - BridgeHubRococoChainId, RococoGlobalConsensusNetwork, WithBridgeHubRococoMessageBridge, - WithBridgeHubRococoMessagesInstance, XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - }; - use frame_support::assert_ok; - - type RuntimeHelper = bridge_hub_test_utils::RuntimeHelper; - - pub(crate) fn set_wococo_flavor() { - let flavor_key = xcm_config::Flavor::key().to_vec(); - let flavor = RuntimeFlavor::Wococo; - - // encode `set_storage` call - let set_storage_call = RuntimeCall::System(frame_system::Call::::set_storage { - items: vec![(flavor_key, flavor.encode())], - }) - .encode(); - - // estimate - storing just 1 value - use frame_system::WeightInfo; - let require_weight_at_most = - ::SystemWeightInfo::set_storage(1); - - // execute XCM with Transact to `set_storage` as governance does - assert_ok!(RuntimeHelper::execute_as_governance(set_storage_call, require_weight_at_most) - .ensure_complete()); - - // check if stored - assert_eq!(flavor, xcm_config::Flavor::get()); - } - - bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID - ); - - #[test] - fn initialize_bridge_by_governance_works() { - bridge_hub_test_utils::test_cases::initialize_bridge_by_governance_works::< - Runtime, - BridgeGrandpaRococoInstance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::BridgeRococoGrandpa(call).encode()), - ) - } - - #[test] - fn change_delivery_reward_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< - Runtime, - DeliveryRewardInBalance, - u64, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || (DeliveryRewardInBalance::key().to_vec(), DeliveryRewardInBalance::get()), - |old_value| old_value.checked_mul(2).unwrap(), - ) - } - - #[test] - fn change_required_stake_by_governance_works() { - bridge_hub_test_utils::test_cases::change_storage_constant_by_governance_works::< - Runtime, - RequiredStakeForStakeAndSlash, - Balance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - Box::new(|call| RuntimeCall::System(call).encode()), - || { - ( - RequiredStakeForStakeAndSlash::key().to_vec(), - RequiredStakeForStakeAndSlash::get(), - ) - }, - |old_value| old_value.checked_mul(2).unwrap(), - ) - } - - #[test] - fn handle_export_message_from_system_parachain_add_to_outbound_queue_works() { - bridge_hub_test_utils::test_cases::handle_export_message_from_system_parachain_to_outbound_queue_works::< - Runtime, - XcmConfig, - WithBridgeHubRococoMessagesInstance, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::BridgeRococoMessages(event)) => Some(event), - _ => None, - } - }), - || ExportMessage { network: Rococo, destination: X1(Parachain(4321)), xcm: Xcm(vec![]) }, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - Some((TokenLocation::get(), ExistentialDeposit::get()).into()), - // value should be >= than value generated by `can_calculate_weight_for_paid_export_message_with_reserve_transfer` - Some((TokenLocation::get(), bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs::get()).into()), - set_wococo_flavor, - ) - } - - #[test] - fn message_dispatch_routing_works() { - bridge_hub_test_utils::test_cases::message_dispatch_routing_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - WithBridgeHubRococoMessagesInstance, - RelayNetwork, - RococoGlobalConsensusNetwork, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ParachainSystem(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - set_wococo_flavor, - ) - } - - #[test] - fn relayed_incoming_message_works() { - bridge_hub_test_utils::test_cases::relayed_incoming_message_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - Wococo, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - set_wococo_flavor, - ) - } - - #[test] - pub fn complex_relay_extrinsic_works() { - bridge_hub_test_utils::test_cases::complex_relay_extrinsic_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - bp_bridge_hub_wococo::BRIDGE_HUB_WOCOCO_PARACHAIN_ID, - bp_bridge_hub_rococo::BRIDGE_HUB_ROCOCO_PARACHAIN_ID, - SIBLING_PARACHAIN_ID, - BridgeHubRococoChainId::get(), - Wococo, - XCM_LANE_FOR_ASSET_HUB_WOCOCO_TO_ASSET_HUB_ROCOCO, - ExistentialDeposit::get(), - executive_init_block, - construct_and_apply_extrinsic, - set_wococo_flavor, - ); - } - - #[test] - pub fn can_calculate_weight_for_paid_export_message_with_reserve_transfer() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_weight_for_paid_export_message_with_reserve_transfer::< - Runtime, - XcmConfig, - WeightToFee, - >(); - - // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs::get(); - assert!( - estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseXcmFeeInWocs` value", - estimated, - max_expected - ); - } - - #[test] - pub fn can_calculate_fee_for_complex_message_delivery_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_delivery_transaction::< - Runtime, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); - - // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseDeliveryFeeInWocs::get(); - assert!( - estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseDeliveryFeeInWocs` value", - estimated, - max_expected - ); - } - - #[test] - pub fn can_calculate_fee_for_complex_message_confirmation_transaction() { - let estimated = bridge_hub_test_utils::test_cases::can_calculate_fee_for_complex_message_confirmation_transaction::< - Runtime, - BridgeGrandpaRococoInstance, - BridgeParachainRococoInstance, - WithBridgeHubRococoMessagesInstance, - WithBridgeHubRococoMessageBridge, - >( - collator_session_keys(), - construct_and_estimate_extrinsic_fee - ); - - // check if estimated value is sane - let max_expected = bp_bridge_hub_wococo::BridgeHubWococoBaseConfirmationFeeInWocs::get(); - assert!( - estimated <= max_expected, - "calculated: {:?}, max_expected: {:?}, please adjust `bp_bridge_hub_wococo::BridgeHubWococoBaseConfirmationFeeInWocs` value", - estimated, - max_expected - ); - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index 18181ed3e05..bd171be53bf 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -41,8 +41,6 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} # Bridges -bp-bridge-hub-rococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-rococo", default-features = false } -bp-bridge-hub-wococo = { path = "../../../../../bridges/primitives/chain-bridge-hub-wococo", default-features = false } bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } bp-messages = { path = "../../../../../bridges/primitives/messages", default-features = false } bp-parachains = { path = "../../../../../bridges/primitives/parachains", default-features = false } @@ -60,8 +58,6 @@ bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", de default = [ "std" ] std = [ "asset-test-utils/std", - "bp-bridge-hub-rococo/std", - "bp-bridge-hub-wococo/std", "bp-header-chain/std", "bp-messages/std", "bp-parachains/std", diff --git a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs index b4a73ff8aaa..a8d3d2975ad 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs @@ -32,7 +32,6 @@ pub type AssetHubWestendChainSpec = sc_service::GenericChainSpec; pub type AssetHubRococoChainSpec = sc_service::GenericChainSpec; -pub type AssetHubWococoChainSpec = AssetHubRococoChainSpec; const ASSET_HUB_POLKADOT_ED: AssetHubBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; @@ -433,6 +432,7 @@ pub fn asset_hub_westend_development_config() -> AssetHubWestendChainSpec { get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], + parachains_common::westend::currency::UNITS * 1_000_000, 1000.into(), )) .with_properties(properties) @@ -478,6 +478,7 @@ pub fn asset_hub_westend_local_config() -> AssetHubWestendChainSpec { get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], + parachains_common::westend::currency::UNITS * 1_000_000, 1000.into(), )) .with_properties(properties) @@ -522,6 +523,7 @@ pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { ), ], Vec::new(), + ASSET_HUB_WESTEND_ED * 4096, 1000.into(), )) .with_properties(properties) @@ -531,6 +533,7 @@ pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { fn asset_hub_westend_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, + endowment: AssetHubBalance, id: ParaId, ) -> serde_json::Value { serde_json::json!({ @@ -538,7 +541,7 @@ fn asset_hub_westend_genesis( "balances": endowed_accounts .iter() .cloned() - .map(|k| (k, ASSET_HUB_WESTEND_ED * 4096)) + .map(|k| (k, endowment)) .collect::>(), }, "parachainInfo": { @@ -579,19 +582,6 @@ pub fn asset_hub_rococo_development_config() -> AssetHubRococoChainSpec { ) } -pub fn asset_hub_wococo_development_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - asset_hub_rococo_like_development_config( - properties, - "Wococo Asset Hub Development", - "asset-hub-wococo-dev", - 1000, - ) -} - fn asset_hub_rococo_like_development_config( properties: sc_chain_spec::Properties, name: &str, @@ -617,6 +607,7 @@ fn asset_hub_rococo_like_development_config( get_account_id_from_seed::("Alice//stash"), get_account_id_from_seed::("Bob//stash"), ], + parachains_common::rococo::currency::UNITS * 1_000_000, para_id.into(), )) .with_properties(properties) @@ -636,19 +627,6 @@ pub fn asset_hub_rococo_local_config() -> AssetHubRococoChainSpec { ) } -pub fn asset_hub_wococo_local_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - asset_hub_rococo_like_local_config( - properties, - "Wococo Asset Hub Local", - "asset-hub-wococo-local", - 1000, - ) -} - fn asset_hub_rococo_like_local_config( properties: sc_chain_spec::Properties, name: &str, @@ -688,6 +666,7 @@ fn asset_hub_rococo_like_local_config( get_account_id_from_seed::("Eve//stash"), get_account_id_from_seed::("Ferdie//stash"), ], + parachains_common::rococo::currency::UNITS * 1_000_000, para_id.into(), )) .with_properties(properties) @@ -735,54 +714,7 @@ pub fn asset_hub_rococo_genesis_config() -> AssetHubRococoChainSpec { ), ], Vec::new(), - para_id.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_wococo_genesis_config() -> AssetHubWococoChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 42.into()); - properties.insert("tokenSymbol".into(), "WOC".into()); - properties.insert("tokenDecimals".into(), 12.into()); - let para_id = 1000; - AssetHubRococoChainSpec::builder( - asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "wococo".into(), para_id }, - ) - .with_name("Wococo Asset Hub") - .with_id("asset-hub-wococo") - .with_chain_type(ChainType::Live) - .with_genesis_config_patch(asset_hub_rococo_genesis( - // initial collators. - vec![ - // 5C8RGkS8t5K93fB2hkgKbvSYs5iG6AknJMuQmbBDeazon9Lj - ( - hex!("02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534").into(), - hex!("02d526f43cf27e94f478f9db785dc86052a77c695e7c855211839d3fde3ce534") - .unchecked_into(), - ), - // 5GePeDZQeBagXH7kH5QPKnQKi39Z5hoYFB5FmUtEvc4yxKej - ( - hex!("caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814").into(), - hex!("caa1f623ca183296c4521b56cc29c484ca017830f8cb538f30f2d4664d631814") - .unchecked_into(), - ), - // 5CfnTTb9NMJDNKDntA83mHKoedZ7wjDC8ypLCTDd4NwUx3zv - ( - hex!("1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965").into(), - hex!("1ac112d635db2bd34e79ae2b99486cf7c0b71a928668e4feb3dc4633d368f965") - .unchecked_into(), - ), - // 5EqheiwiG22gvGpN7cvrbeaQzhg7rzsYYVkYK4yj5vRrTQRQ - ( - hex!("7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66").into(), - hex!("7ac9d11be07334cd27e9eb849f5fc7677a10ad36b6ab38b377d3c8b2c0b08b66") - .unchecked_into(), - ), - ], - Vec::new(), + ASSET_HUB_ROCOCO_ED * 524_288, para_id.into(), )) .with_properties(properties) @@ -792,6 +724,7 @@ pub fn asset_hub_wococo_genesis_config() -> AssetHubWococoChainSpec { fn asset_hub_rococo_genesis( invulnerables: Vec<(AccountId, AuraId)>, endowed_accounts: Vec, + endowment: AssetHubBalance, id: ParaId, ) -> serde_json::Value { serde_json::json!({ @@ -799,7 +732,7 @@ fn asset_hub_rococo_genesis( balances: endowed_accounts .iter() .cloned() - .map(|k| (k, ASSET_HUB_ROCOCO_ED * 524_288)) + .map(|k| (k, endowment)) .collect(), }, "parachainInfo": asset_hub_rococo_runtime::ParachainInfoConfig { diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 71fb9e5b140..94cef106001 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -29,9 +29,6 @@ pub enum BridgeHubRuntimeType { // used by benchmarks RococoDevelopment, - Wococo, - WococoLocal, - Kusama, KusamaLocal, // used by benchmarks @@ -66,8 +63,6 @@ impl FromStr for BridgeHubRuntimeType { rococo::BRIDGE_HUB_ROCOCO => Ok(BridgeHubRuntimeType::Rococo), rococo::BRIDGE_HUB_ROCOCO_LOCAL => Ok(BridgeHubRuntimeType::RococoLocal), rococo::BRIDGE_HUB_ROCOCO_DEVELOPMENT => Ok(BridgeHubRuntimeType::RococoDevelopment), - wococo::BRIDGE_HUB_WOCOCO => Ok(BridgeHubRuntimeType::Wococo), - wococo::BRIDGE_HUB_WOCOCO_LOCAL => Ok(BridgeHubRuntimeType::WococoLocal), _ => Err(format!("Value '{}' is not configured yet", value)), } } @@ -94,8 +89,6 @@ impl BridgeHubRuntimeType { BridgeHubRuntimeType::RococoLocal | BridgeHubRuntimeType::RococoDevelopment => Ok(Box::new(rococo::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Wococo | BridgeHubRuntimeType::WococoLocal => - Ok(Box::new(wococo::BridgeHubChainSpec::from_json_file(path)?)), } } @@ -171,17 +164,6 @@ impl BridgeHubRuntimeType { Some("Bob".to_string()), |_| (), ))), - BridgeHubRuntimeType::Wococo => - Ok(Box::new(wococo::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-wococo.json")[..], - )?)), - BridgeHubRuntimeType::WococoLocal => Ok(Box::new(wococo::local_config( - wococo::BRIDGE_HUB_WOCOCO_LOCAL, - "Wococo BridgeHub Local", - "wococo-local", - ParaId::new(1014), - Some("Bob".to_string()), - ))), } } } @@ -309,22 +291,9 @@ pub mod rococo { "polkadotXcm": { "safeXcmVersion": Some(SAFE_XCM_VERSION), }, - - "bridgeWococoGrandpa": { - "owner": bridges_pallet_owner.clone(), - }, "bridgeWestendGrandpa": { "owner": bridges_pallet_owner.clone(), }, - "bridgeRococoGrandpa": { - "owner": bridges_pallet_owner.clone(), - }, - "bridgeRococoMessages": { - "owner": bridges_pallet_owner.clone(), - }, - "bridgeWococoMessages": { - "owner": bridges_pallet_owner.clone(), - }, "bridgeWestendMessages": { "owner": bridges_pallet_owner.clone(), }, @@ -332,37 +301,6 @@ pub mod rococo { } } -/// Sub-module for Wococo setup (reuses stuff from Rococo) -pub mod wococo { - use super::ParaId; - use crate::chain_spec::bridge_hubs::rococo; - - pub(crate) const BRIDGE_HUB_WOCOCO: &str = "bridge-hub-wococo"; - pub(crate) const BRIDGE_HUB_WOCOCO_LOCAL: &str = "bridge-hub-wococo-local"; - - pub type BridgeHubChainSpec = rococo::BridgeHubChainSpec; - pub type RuntimeApi = rococo::RuntimeApi; - - pub fn local_config( - id: &str, - chain_name: &str, - relay_chain: &str, - para_id: ParaId, - bridges_pallet_owner_seed: Option, - ) -> BridgeHubChainSpec { - rococo::local_config( - id, - chain_name, - relay_chain, - para_id, - bridges_pallet_owner_seed, - |properties| { - properties.insert("tokenSymbol".into(), "WOOK".into()); - }, - ) - } -} - /// Sub-module for Kusama setup pub mod kusama { use super::{BridgeHubBalance, ParaId}; diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 2799175b8ee..7bede22fea7 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -43,7 +43,6 @@ enum Runtime { AssetHubPolkadot, AssetHubKusama, AssetHubRococo, - AssetHubWococo, AssetHubWestend, Penpal(ParaId), ContractsRococo, @@ -95,8 +94,6 @@ fn runtime(id: &str) -> Runtime { Runtime::AssetHubKusama } else if id.starts_with("asset-hub-rococo") { Runtime::AssetHubRococo - } else if id.starts_with("asset-hub-wococo") { - Runtime::AssetHubWococo } else if id.starts_with("asset-hub-westend") | id.starts_with("westmint") { Runtime::AssetHubWestend } else if id.starts_with("penpal") { @@ -186,19 +183,6 @@ fn load_spec(id: &str) -> std::result::Result, String> { &include_bytes!("../chain-specs/asset-hub-rococo.json")[..], )?), - // -- Asset Hub Wococo - "asset-hub-wococo-dev" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_development_config()), - "asset-hub-wococo-local" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_local_config()), - // the chain spec as used for generating the upgrade genesis values - "asset-hub-wococo-genesis" => - Box::new(chain_spec::asset_hubs::asset_hub_wococo_genesis_config()), - "asset-hub-wococo" => - Box::new(chain_spec::asset_hubs::AssetHubWococoChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-wococo.json")[..], - )?), - // -- Asset Hub Westend "asset-hub-westend-dev" | "westmint-dev" => Box::new(chain_spec::asset_hubs::asset_hub_westend_development_config()), @@ -302,8 +286,6 @@ fn load_spec(id: &str) -> std::result::Result, String> { Box::new(chain_spec::asset_hubs::AssetHubKusamaChainSpec::from_json_file(path)?), Runtime::AssetHubRococo => Box::new(chain_spec::asset_hubs::AssetHubRococoChainSpec::from_json_file(path)?), - Runtime::AssetHubWococo => - Box::new(chain_spec::asset_hubs::AssetHubWococoChainSpec::from_json_file(path)?), Runtime::AssetHubWestend => Box::new( chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_file(path)?, ), @@ -464,7 +446,7 @@ macro_rules! construct_partials { )?; $code }, - Runtime::AssetHubRococo | Runtime::AssetHubWococo => { + Runtime::AssetHubRococo => { let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, @@ -522,14 +504,6 @@ macro_rules! construct_partials { )?; $code }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, }, Runtime::CollectivesPolkadot => { let $partials = new_partial::( @@ -605,7 +579,7 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::AssetHubRococo | Runtime::AssetHubWococo => { + Runtime::AssetHubRococo => { runner.async_run(|$config| { let $components = new_partial::( &$config, @@ -739,18 +713,6 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } } }, Runtime::Penpal(_) | Runtime::Default => { @@ -978,7 +940,7 @@ pub fn run() -> Result<()> { .await .map(|r| r.0) .map_err(Into::into), - Runtime::AssetHubRococo | Runtime::AssetHubWococo => crate::service::start_asset_hub_node::< + Runtime::AssetHubRococo => crate::service::start_asset_hub_node::< asset_hub_rococo_runtime::RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) @@ -1077,14 +1039,6 @@ chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Wococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WococoLocal => - crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::wococo::RuntimeApi, - AuraId, - >(config, polkadot_config, collator_options, id, hwbench) - .await - .map(|r| r.0), } .map_err(Into::into), Runtime::Penpal(_) | Runtime::Default => diff --git a/cumulus/scripts/bridges_rococo_westend.sh b/cumulus/scripts/bridges_rococo_westend.sh index 82b5f1942b2..9b3bd350276 100755 --- a/cumulus/scripts/bridges_rococo_westend.sh +++ b/cumulus/scripts/bridges_rococo_westend.sh @@ -170,8 +170,6 @@ function run_relay() { --bridge-hub-rococo-port 8943 \ --bridge-hub-rococo-version-mode Auto \ --bridge-hub-rococo-signer //Charlie \ - --westend-headers-to-bridge-hub-rococo-signer //Bob \ - --westend-parachains-to-bridge-hub-rococo-signer //Bob \ --bridge-hub-rococo-transactions-mortality 4 \ --westend-host localhost \ --westend-port 9945 \ @@ -180,8 +178,6 @@ function run_relay() { --bridge-hub-westend-port 8945 \ --bridge-hub-westend-version-mode Auto \ --bridge-hub-westend-signer //Charlie \ - --rococo-headers-to-bridge-hub-westend-signer //Bob \ - --rococo-parachains-to-bridge-hub-westend-signer //Bob \ --bridge-hub-westend-transactions-mortality 4 \ --lane "${LANE_ID}" } @@ -209,7 +205,7 @@ case "$1" in "ws://127.0.0.1:9910" \ "//Alice" \ "$GLOBAL_CONSENSUS_WESTEND_ASSET_HUB_WESTEND_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000 + 50000000000 * 20)) # HRMP open_hrmp_channels \ "ws://127.0.0.1:9942" \ @@ -227,19 +223,19 @@ case "$1" in "ws://127.0.0.1:8943" \ "//Alice" \ "$ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000 + 50000000000 * 20)) # drip SA of lane dedicated to asset hub for paying rewards for delivery transfer_balance \ "ws://127.0.0.1:8943" \ "//Alice" \ "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_ThisChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000 + 2000000000000)) # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation transfer_balance \ "ws://127.0.0.1:8943" \ "//Alice" \ "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhwd_BridgedChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000 + 2000000000000)) ;; init-asset-hub-westend-local) ensure_polkadot_js_api @@ -258,7 +254,7 @@ case "$1" in "ws://127.0.0.1:9010" \ "//Alice" \ "$GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000000 + 50000000000 * 20)) # HRMP open_hrmp_channels \ "ws://127.0.0.1:9945" \ @@ -275,19 +271,19 @@ case "$1" in "ws://127.0.0.1:8945" \ "//Alice" \ "$ASSET_HUB_WESTEND_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WESTEND" \ - $((1000000000 + 50000000000 * 20)) + $((1000000000000000 + 50000000000 * 20)) # drip SA of lane dedicated to asset hub for paying rewards for delivery transfer_balance \ "ws://127.0.0.1:8945" \ "//Alice" \ "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_ThisChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000000 + 2000000000000)) # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation transfer_balance \ "ws://127.0.0.1:8945" \ "//Alice" \ "$ON_BRIDGE_HUB_WESTEND_SOVEREIGN_ACCOUNT_FOR_LANE_00000002_bhro_BridgedChain" \ - $((1000000000 + 2000000000000)) + $((1000000000000000 + 2000000000000)) ;; reserve-transfer-assets-from-asset-hub-rococo-local) ensure_polkadot_js_api @@ -309,7 +305,7 @@ case "$1" in "//Alice" \ "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Westend" }, { "Parachain": 1000 } ] } } }')" \ "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 140000000000 } } ] }')" \ + "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } } }, "fun": { "Fungible": 40000000000 } } ] }')" \ 0 \ "Unlimited" ;; diff --git a/cumulus/scripts/bridges_rococo_wococo.sh b/cumulus/scripts/bridges_rococo_wococo.sh deleted file mode 100755 index dd7c7062a3b..00000000000 --- a/cumulus/scripts/bridges_rococo_wococo.sh +++ /dev/null @@ -1,386 +0,0 @@ -#!/bin/bash - -# import common functions -source "$(dirname "$0")"/bridges_common.sh - -# Expected sovereign accounts. -# -# Generated by: -# -# #[test] -# fn generate_sovereign_accounts() { -# use sp_core::crypto::Ss58Codec; -# use polkadot_parachain_primitives::primitives::Sibling; -# -# parameter_types! { -# pub UniversalLocationAHR: InteriorMultiLocation = X2(GlobalConsensus(Rococo), Parachain(1000)); -# pub UniversalLocationAHW: InteriorMultiLocation = X2(GlobalConsensus(Wococo), Parachain(1000)); -# } -# -# // SS58=42 -# println!("GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Rococo)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusParachainConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X2(GlobalConsensus(Rococo), Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# -# // SS58=42 -# println!("GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X1(GlobalConsensus(Wococo)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# GlobalConsensusParachainConvertsFor::::convert_location( -# &MultiLocation { parents: 2, interior: X2(GlobalConsensus(Wococo), Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# println!("ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# SiblingParachainConvertsVia::::convert_location( -# &MultiLocation { parents: 1, interior: X1(Parachain(1000)) }).unwrap() -# ).to_ss58check_with_version(42_u16.into()) -# ); -# } -GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT="5GxRGwT8bU1JeBPTUXc7LEjZMxNrK8MyL2NJnkWFQJTQ4sii" -GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT="5CfNu7eH3SJvqqPt3aJh38T8dcFvhGzEohp9tsd41ANhXDnQ" -ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" -GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT="5EWw2NzfPr2DCahourp33cya6bGWEJViTnJN6Z2ruFevpJML" -GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT="5EJX8L4dwGyYnCsjZ91LfWAsm3rCN8vY2AYvT4mauMEjsrQz" -ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO="5Eg2fntNprdN3FgH4sfEaaZhYtddZQSQUqvYJ1f2mLtinVhV" - -# Expected sovereign accounts for rewards on BridgeHubs. -# -# Generated by: -#[test] -#fn generate_sovereign_accounts_for_rewards() { -# use bp_messages::LaneId; -# use bp_relayers::{PayRewardFromAccount, RewardsAccountOwner, RewardsAccountParams}; -# use sp_core::crypto::Ss58Codec; -# -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhwo", -# RewardsAccountOwner::ThisChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhwo", -# RewardsAccountOwner::BridgedChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhro", -# RewardsAccountOwner::ThisChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -# // SS58=42 -# println!( -# "ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain=\"{}\"", -# frame_support::sp_runtime::AccountId32::new( -# PayRewardFromAccount::<[u8; 32], [u8; 32]>::rewards_account(RewardsAccountParams::new( -# LaneId([0, 0, 0, 1]), -# *b"bhro", -# RewardsAccountOwner::BridgedChain -# )) -# ) -# .to_ss58check_with_version(42_u16.into()) -# ); -#} -ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain="5EHnXaT5BhiS8YRPMeHi97YHofTtNx4pLNb8wR8TwjVq1gzU" -ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain="5EHnXaT5BhiS8YRPMeHyt95svA95qWAh53XeVMpJQZNZHAzj" -ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain="5EHnXaT5BhiS8YRNuCukWXTQdAqARjjXmpjehjx1YZNE5keZ" -ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain="5EHnXaT5BhiS8YRNuCv2FYzzjfWMtHqQWVgAFgdr1PExMN94" - -LANE_ID="00000001" - -function init_ro_wo() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge rococo-to-bridge-hub-wococo \ - --source-host localhost \ - --source-port 9942 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8945 \ - --target-version-mode Auto \ - --target-signer //Bob -} - -function init_wo_ro() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay init-bridge wococo-to-bridge-hub-rococo \ - --source-host localhost \ - --source-port 9945 \ - --source-version-mode Auto \ - --target-host localhost \ - --target-port 8943 \ - --target-version-mode Auto \ - --target-signer //Bob -} - -function run_relay() { - ensure_relayer - - RUST_LOG=runtime=trace,rpc=trace,bridge=trace \ - ~/local_bridge_testing/bin/substrate-relay relay-headers-and-messages bridge-hub-rococo-bridge-hub-wococo \ - --rococo-host localhost \ - --rococo-port 9942 \ - --rococo-version-mode Auto \ - --bridge-hub-rococo-host localhost \ - --bridge-hub-rococo-port 8943 \ - --bridge-hub-rococo-version-mode Auto \ - --bridge-hub-rococo-signer //Charlie \ - --wococo-headers-to-bridge-hub-rococo-signer //Bob \ - --wococo-parachains-to-bridge-hub-rococo-signer //Bob \ - --bridge-hub-rococo-transactions-mortality 4 \ - --wococo-host localhost \ - --wococo-port 9945 \ - --wococo-version-mode Auto \ - --bridge-hub-wococo-host localhost \ - --bridge-hub-wococo-port 8945 \ - --bridge-hub-wococo-version-mode Auto \ - --bridge-hub-wococo-signer //Charlie \ - --rococo-headers-to-bridge-hub-wococo-signer //Bob \ - --rococo-parachains-to-bridge-hub-wococo-signer //Bob \ - --bridge-hub-wococo-transactions-mortality 4 \ - --lane "${LANE_ID}" -} - -case "$1" in - run-relay) - init_ro_wo - init_wo_ro - run_relay - ;; - init-asset-hub-rococo-local) - ensure_polkadot_js_api - # create foreign assets for native Wococo token (governance call on Rococo) - force_create_foreign_asset \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9910" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Wococo" } } }')" \ - "$GLOBAL_CONSENSUS_WOCOCO_SOVEREIGN_ACCOUNT" \ - 10000000000 \ - true - # drip SA which holds reserves - transfer_balance \ - "ws://127.0.0.1:9910" \ - "//Alice" \ - "$GLOBAL_CONSENSUS_WOCOCO_ASSET_HUB_WOCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) - # HRMP - open_hrmp_channels \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1000 1013 4 524288 - open_hrmp_channels \ - "ws://127.0.0.1:9942" \ - "//Alice" \ - 1013 1000 4 524288 - ;; - init-bridge-hub-rococo-local) - ensure_polkadot_js_api - # SA of sibling asset hub pays for the execution - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ASSET_HUB_ROCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_ROCOCO" \ - $((1000000000 + 50000000000 * 20)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_ThisChain" \ - $((1000000000 + 2000000000000)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation - transfer_balance \ - "ws://127.0.0.1:8943" \ - "//Alice" \ - "$ON_BRIDGE_HUB_ROCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhwo_BridgedChain" \ - $((1000000000 + 2000000000000)) - ;; - init-asset-hub-wococo-local) - ensure_polkadot_js_api - # set Wococo flavor - set_storage with: - # - `key` is `HexDisplay::from(&asset_hub_rococo_runtime::xcm_config::Flavor::key())` - # - `value` is `HexDisplay::from(&asset_hub_rococo_runtime::RuntimeFlavor::Wococo.encode())` - set_storage \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9010" \ - "$(jq --null-input '[["0x48297505634037ef48c848c99c0b1f1b", "0x01"]]')" - # create foreign assets for native Rococo token (governance call on Wococo) - force_create_foreign_asset \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 \ - "ws://127.0.0.1:9010" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } }')" \ - "$GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT" \ - 10000000000 \ - true - # drip SA which holds reserves - transfer_balance \ - "ws://127.0.0.1:9010" \ - "//Alice" \ - "$GLOBAL_CONSENSUS_ROCOCO_ASSET_HUB_ROCOCO_1000_SOVEREIGN_ACCOUNT" \ - $((1000000000 + 50000000000 * 20)) - # HRMP - open_hrmp_channels \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1000 1014 4 524288 - open_hrmp_channels \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1014 1000 4 524288 - ;; - init-bridge-hub-wococo-local) - # set Wococo flavor - set_storage with: - # - `key` is `HexDisplay::from(&bridge_hub_rococo_runtime::xcm_config::Flavor::key())` - # - `value` is `HexDisplay::from(&bridge_hub_rococo_runtime::RuntimeFlavor::Wococo.encode())` - set_storage \ - "ws://127.0.0.1:9945" \ - "//Alice" \ - 1014 \ - "ws://127.0.0.1:8945" \ - "$(jq --null-input '[["0x48297505634037ef48c848c99c0b1f1b", "0x01"]]')" - # SA of sibling asset hub pays for the execution - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ASSET_HUB_WOCOCO_SOVEREIGN_ACCOUNT_AT_BRIDGE_HUB_WOCOCO" \ - $((1000000000 + 50000000000 * 20)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_ThisChain" \ - $((1000000000 + 2000000000000)) - # drip SA of lane dedicated to asset hub for paying rewards for delivery confirmation - transfer_balance \ - "ws://127.0.0.1:8945" \ - "//Alice" \ - "$ON_BRIDGE_HUB_WOCOCO_SOVEREIGN_ACCOUNT_FOR_LANE_00000001_bhro_BridgedChain" \ - $((1000000000 + 2000000000000)) - ;; - reserve-transfer-assets-from-asset-hub-rococo-local) - ensure_polkadot_js_api - # send ROCs to Alice account on AHW - limited_reserve_transfer_assets \ - "ws://127.0.0.1:9910" \ - "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Wococo" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 200000000000 } } ] }')" \ - 0 \ - "Unlimited" - ;; - reserve-transfer-assets-from-asset-hub-wococo-local) - ensure_polkadot_js_api - # send WOCs to Alice account on AHR - limited_reserve_transfer_assets \ - "ws://127.0.0.1:9010" \ - "//Alice" \ - "$(jq --null-input '{ "V3": { "parents": 2, "interior": { "X2": [ { "GlobalConsensus": "Rococo" }, { "Parachain": 1000 } ] } } }')" \ - "$(jq --null-input '{ "V3": { "parents": 0, "interior": { "X1": { "AccountId32": { "id": [212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125] } } } } }')" \ - "$(jq --null-input '{ "V3": [ { "id": { "Concrete": { "parents": 1, "interior": "Here" } }, "fun": { "Fungible": 150000000000 } } ] }')" \ - 0 \ - "Unlimited" - ;; - claim-rewards-bridge-hub-rococo-local) - ensure_polkadot_js_api - # bhwo -> [62, 68, 77, 6f] -> 0x6268776f - claim_rewards \ - "ws://127.0.0.1:8943" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268776f" \ - "ThisChain" - claim_rewards \ - "ws://127.0.0.1:8943" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268776f" \ - "BridgedChain" - ;; - claim-rewards-bridge-hub-wococo-local) - # bhro -> [62, 68, 72, 6f] -> 0x6268726f - claim_rewards \ - "ws://127.0.0.1:8945" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268726f" \ - "ThisChain" - claim_rewards \ - "ws://127.0.0.1:8945" \ - "//Charlie" \ - "0x${LANE_ID}" \ - "0x6268726f" \ - "BridgedChain" - ;; - stop) - pkill -f polkadot - pkill -f parachain - ;; - import) - # to avoid trigger anything here - ;; - *) - echo "A command is require. Supported commands for: - Local (zombienet) run: - - run-relay - - init-asset-hub-rococo-local - - init-bridge-hub-rococo-local - - init-asset-hub-wococo-local - - init-bridge-hub-wococo-local - - reserve-transfer-assets-from-asset-hub-rococo-local - - reserve-transfer-assets-from-asset-hub-wococo-local - - claim-rewards-bridge-hub-rococo-local - - claim-rewards-bridge-hub-wococo-local"; - exit 1 - ;; -esac diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml index a117942858e..99a7d0035b5 100644 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml +++ b/cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml @@ -41,8 +41,7 @@ cumulus_based = true ws_port = 8943 args = [ "-lparachain=debug,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48933" + "--force-authoring" ] # run bob as parachain collator @@ -54,8 +53,7 @@ cumulus_based = true ws_port = 8944 args = [ "-lparachain=trace,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48934" + "--force-authoring" ] [[parachains]] @@ -69,16 +67,14 @@ cumulus_based = true ws_port = 9910 command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 58933" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] [[parachains.collators]] name = "asset-hub-rococo-collator2" command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 58833" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] #[[hrmp_channels]] diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml index 4c345d3825c..1919d1c63f2 100644 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml +++ b/cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml @@ -41,8 +41,7 @@ cumulus_based = true ws_port = 8945 args = [ "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48935" + "--force-authoring" ] # run bob as parachain collator @@ -54,8 +53,7 @@ cumulus_based = true ws_port = 8946 args = [ "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--rpc-port 48936" + "--force-authoring" ] [[parachains]] @@ -69,16 +67,14 @@ cumulus_based = true ws_port = 9010 command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 38933" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] [[parachains.collators]] name = "asset-hub-westend-collator2" command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND}}" args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--rpc-port 38833" + "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace" ] #[[hrmp_channels]] diff --git a/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml b/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml deleted file mode 100644 index ae5cf641f66..00000000000 --- a/cumulus/zombienet/bridge-hubs/bridge_hub_wococo_local_network.toml +++ /dev/null @@ -1,94 +0,0 @@ -[settings] -node_spawn_timeout = 240 - -[relaychain] -default_command = "{{POLKADOT_BINARY_PATH}}" -default_args = [ "-lparachain=debug,xcm=trace" ] -chain = "wococo-local" - - [[relaychain.nodes]] - name = "alice-wococo-validator" - validator = true - rpc_port = 9935 - ws_port = 9945 - balance = 2000000000000 - - [[relaychain.nodes]] - name = "bob-wococo-validator" - validator = true - rpc_port = 9936 - ws_port = 9946 - balance = 2000000000000 - - [[relaychain.nodes]] - name = "charlie-wococo-validator" - validator = true - rpc_port = 9937 - ws_port = 9947 - balance = 2000000000000 - -[[parachains]] -id = 1014 -chain = "bridge-hub-wococo-local" -cumulus_based = true - - # run alice as parachain collator - [[parachains.collators]] - name = "bridge-hub-wococo-collator1" - validator = true - command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" - rpc_port = 8935 - ws_port = 8945 - args = [ - "-lparachain=debug,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--port 41335", "--rpc-port 48935" - ] - - # run bob as parachain collator - [[parachains.collators]] - name = "bridge-hub-wococo-collator2" - validator = true - command = "{{POLKADOT_PARACHAIN_BINARY_PATH}}" - rpc_port = 8936 - ws_port = 8946 - args = [ - "-lparachain=trace,runtime::mmr=info,substrate=info,runtime=info,runtime::bridge-hub=trace,runtime::bridge=trace,runtime::bridge-dispatch=trace,bridge=trace,runtime::bridge-messages=trace,xcm=trace", - "--force-authoring", - "--", "--port 41336", "--rpc-port 48936" - ] - -[[parachains]] -id = 1000 -chain = "asset-hub-wococo-local" -cumulus_based = true - - [[parachains.collators]] - name = "asset-hub-wococo-collator1" - rpc_port = 9011 - ws_port = 9010 - command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO}}" - args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--port 31333", "--rpc-port 38933" - ] - - [[parachains.collators]] - name = "asset-hub-wococo-collator2" - command = "{{POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WOCOCO}}" - args = [ - "-lparachain=debug,xcm=trace,runtime::bridge-transfer=trace", - "--", "--port 31433", "--rpc-port 38833" - ] - -#[[hrmp_channels]] -#sender = 1000 -#recipient = 1014 -#max_capacity = 4 -#max_message_size = 524288 -# -#[[hrmp_channels]] -#sender = 1014 -#recipient = 1000 -#max_capacity = 4 -#max_message_size = 524288 -- GitLab From ea4085ab7448bb557a1558a25af164cf364e88d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 15 Nov 2023 23:54:08 +0100 Subject: [PATCH 063/199] frame-system: Add `last_runtime_upgrade_spec_version` (#2351) Adds a function for querying the last runtime upgrade spec version. This can be useful for when writing runtime level migrations to ensure that they are not executed multiple times. An example would be a session key migration. --------- Co-authored-by: Liam Aharon Co-authored-by: Oliver Tale-Yazdi --- Cargo.lock | 1 + substrate/frame/executive/src/lib.rs | 52 ++++++++++++++-------------- substrate/frame/system/Cargo.toml | 1 + substrate/frame/system/src/lib.rs | 19 ++++++++++ substrate/frame/system/src/tests.rs | 25 ++++++++++++- 5 files changed, 71 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0c374a90a18..7854042ddbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5904,6 +5904,7 @@ version = "4.0.0-dev" dependencies = [ "cfg-if", "criterion 0.4.0", + "docify", "frame-support", "log", "parity-scale-codec", diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index e2c906c1bf6..dec1fe158bd 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -487,6 +487,12 @@ where let mut weight = Weight::zero(); if Self::runtime_upgraded() { weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); + + frame_system::LastRuntimeUpgrade::::put( + frame_system::LastRuntimeUpgradeInfo::from( + >::get(), + ), + ); } >::initialize(block_number, parent_hash, digest); weight = weight.saturating_add(::note_finished_initialize(); } - /// Returns if the runtime was upgraded since the last time this function was called. + /// Returns if the runtime has been upgraded, based on [`frame_system::LastRuntimeUpgrade`]. fn runtime_upgraded() -> bool { let last = frame_system::LastRuntimeUpgrade::::get(); let current = >::get(); - if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { - frame_system::LastRuntimeUpgrade::::put( - frame_system::LastRuntimeUpgradeInfo::from(current), - ); - true - } else { - false - } + last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) } fn initial_checks(block: &Block) { @@ -755,7 +754,7 @@ mod tests { traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, }; - use frame_system::{ChainContext, LastRuntimeUpgradeInfo}; + use frame_system::{ChainContext, LastRuntimeUpgrade, LastRuntimeUpgradeInfo}; use pallet_balances::Call as BalancesCall; use pallet_transaction_payment::CurrencyAdapter; @@ -994,6 +993,9 @@ mod tests { sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); System::deposit_event(frame_system::Event::CodeUpdated); + + assert_eq!(0, System::last_runtime_upgrade_spec_version()); + Weight::from_parts(100, 0) } } @@ -1356,17 +1358,13 @@ mod tests { new_test_ext(1).execute_with(|| { RuntimeVersionTestValues::mutate(|v| *v = Default::default()); // It should be added at genesis - assert!(frame_system::LastRuntimeUpgrade::::exists()); + assert!(LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { @@ -1376,27 +1374,18 @@ mod tests { } }); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); RuntimeVersionTestValues::mutate(|v| { *v = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), + spec_version: 0, impl_version: 2, ..Default::default() } }); assert!(!Executive::runtime_upgraded()); - frame_system::LastRuntimeUpgrade::::take(); + LastRuntimeUpgrade::::take(); assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::::get(), - ); }) } @@ -1444,6 +1433,10 @@ mod tests { assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + assert_eq!( + Some(RuntimeVersionTestValues::get().into()), + LastRuntimeUpgrade::::get(), + ) }); } @@ -1519,6 +1512,9 @@ mod tests { #[test] fn all_weights_are_recorded_correctly() { + // Reset to get the correct new genesis below. + RuntimeVersionTestValues::take(); + new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called for maximum complexity RuntimeVersionTestValues::mutate(|v| { @@ -1535,6 +1531,10 @@ mod tests { Digest::default(), )); + // Reset the last runtime upgrade info, to make the second call to `on_runtime_upgrade` + // succeed. + LastRuntimeUpgrade::::take(); + // All weights that show up in the `initialize_block_impl` let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); let runtime_upgrade_weight = diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index f7733e312c3..b61b4d531e2 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -25,6 +25,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat sp-std = { path = "../../primitives/std", default-features = false} sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } +docify = "0.2.0" [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 0e394a11041..1b8dd6a9367 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1094,6 +1094,25 @@ pub enum DecRefStatus { } impl Pallet { + /// Returns the `spec_version` of the last runtime upgrade. + /// + /// This function is useful for writing guarded runtime migrations in the runtime. A runtime + /// migration can use the `spec_version` to ensure that it isn't applied twice. This works + /// similar as the storage version for pallets. + /// + /// This functions returns the `spec_version` of the last runtime upgrade while executing the + /// runtime migrations + /// [`on_runtime_upgrade`](frame_support::traits::OnRuntimeUpgrade::on_runtime_upgrade) + /// function. After all migrations are executed, this will return the `spec_version` of the + /// current runtime until there is another runtime upgrade. + /// + /// Example: + #[doc = docify::embed!("src/tests.rs", last_runtime_upgrade_spec_version_usage)] + pub fn last_runtime_upgrade_spec_version() -> u32 { + LastRuntimeUpgrade::::get().map_or(0, |l| l.spec_version.0) + } + + /// Returns true if the given account exists. pub fn account_exists(who: &T::AccountId) -> bool { Account::::contains_key(who) } diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index 165df688b1c..6fbddaaf229 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -19,7 +19,7 @@ use crate::*; use frame_support::{ assert_noop, assert_ok, dispatch::{Pays, PostDispatchInfo, WithPostDispatchInfo}, - traits::WhitelistedStorageKeys, + traits::{OnRuntimeUpgrade, WhitelistedStorageKeys}, }; use std::collections::BTreeSet; @@ -773,3 +773,26 @@ pub fn from_actual_ref_time(ref_time: Option) -> PostDispatchInfo { pub fn from_post_weight_info(ref_time: Option, pays_fee: Pays) -> PostDispatchInfo { PostDispatchInfo { actual_weight: ref_time.map(|t| Weight::from_all(t)), pays_fee } } + +#[docify::export] +#[test] +fn last_runtime_upgrade_spec_version_usage() { + struct Migration; + + impl OnRuntimeUpgrade for Migration { + fn on_runtime_upgrade() -> Weight { + // Ensure to compare the spec version against some static version to prevent applying + // the same migration multiple times. + // + // `1337` here is the spec version of the runtime running on chain. If there is maybe + // a runtime upgrade in the pipeline of being applied, you should use the spec version + // of this upgrade. + if System::last_runtime_upgrade_spec_version() > 1337 { + return Weight::zero(); + } + + // Do the migration. + Weight::zero() + } + } +} -- GitLab From d4c426afd46f43b81115911657ccc0002a361ddb Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 16 Nov 2023 12:59:06 +0100 Subject: [PATCH 064/199] [ci] Enable zombienet jobs in PRs (#2361) Since preparation for the merge queues needs more time I'm enabling zombienet jobs in PRs CI back. --- .gitlab/pipeline/publish.yml | 2 +- .gitlab/pipeline/zombienet/cumulus.yml | 2 +- .gitlab/pipeline/zombienet/polkadot.yml | 8 ++++---- .gitlab/pipeline/zombienet/substrate.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index f2308c334e0..3cc2002cc1c 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -72,7 +72,7 @@ publish-rustdoc: IMAGE_NAME: "" # docker.io/paritypr/image_name script: # Exit if the job is not running in a merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - $BUILDAH_COMMAND build --format=docker --build-arg VCS_REF="${CI_COMMIT_SHA}" diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index c8a1df004e3..3cac67c2966 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -4,7 +4,7 @@ .zombienet-before-script: before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombie-net Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${POLKADOT_IMAGE}" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index cc960557298..995dd982532 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -5,7 +5,7 @@ .zombienet-polkadot-common: before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" # from build-linux-stable job - export DEBUG=zombie,zombie::network-node - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} @@ -120,7 +120,7 @@ zombienet-polkadot-smoke-0001-parachains-smoke-test: - .zombienet-polkadot-common before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} - echo "Zombienet Tests Config" @@ -139,7 +139,7 @@ zombienet-polkadot-smoke-0002-parachains-parachains-upgrade-smoke: - .zombienet-polkadot-common before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="${POLKADOT_IMAGE}":${PIPELINE_IMAGE_TAG} - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:${DOCKER_IMAGES_VERSION}" - echo "Zombienet Tests Config" @@ -183,7 +183,7 @@ zombienet-polkadot-misc-0002-upgrade-node: artifacts: true before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - export ZOMBIENET_INTEGRATION_TEST_IMAGE="docker.io/parity/polkadot:latest" - echo "Overrided poladot image ${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export COL_IMAGE="${COLANDER_IMAGE}":${PIPELINE_IMAGE_TAG} diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index e627575a31a..6334e7db9a3 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -5,7 +5,7 @@ .zombienet-substrate-common: before_script: # Exit if the job is not merge queue - - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi - echo "Zombienet Tests Config" - echo "${ZOMBIENET_IMAGE}" - echo "${GH_DIR}" -- GitLab From 02e8061bbadf51bce2f8c419c8645555532c7489 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Thu, 16 Nov 2023 17:36:37 +0200 Subject: [PATCH 065/199] westend: remove SessionKeys migration already applied on-chain (#2363) Westend now successfully updated to `spec: 103000`, we **have to remove** the session keys migration before the next release as it doesn't gracefully handle reapplying it. --- polkadot/runtime/westend/src/lib.rs | 48 ----------------------------- 1 file changed, 48 deletions(-) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 2e8394b0ee4..4d537832ae3 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -425,17 +425,6 @@ parameter_types! { pub const Offset: BlockNumber = 0; } -impl_opaque_keys! { - pub struct OldSessionKeys { - pub grandpa: Grandpa, - pub babe: Babe, - pub im_online: ImOnline, - pub para_validator: Initializer, - pub para_assignment: ParaSessionInfo, - pub authority_discovery: AuthorityDiscovery, - } -} - impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, @@ -448,32 +437,6 @@ impl_opaque_keys! { } } -// remove this when removing `OldSessionKeys` -fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys { - SessionKeys { - grandpa: old.grandpa, - babe: old.babe, - im_online: old.im_online, - para_validator: old.para_validator, - para_assignment: old.para_assignment, - authority_discovery: old.authority_discovery, - beefy: { - // From Session::upgrade_keys(): - // - // Care should be taken that the raw versions of the - // added keys are unique for every `ValidatorId, KeyTypeId` combination. - // This is an invariant that the session pallet typically maintains internally. - // - // So, produce a dummy value that's unique for the `ValidatorId, KeyTypeId` combination. - let mut id: BeefyId = sp_application_crypto::ecdsa::Public::from_raw([0u8; 33]).into(); - let id_raw: &mut [u8] = id.as_mut(); - id_raw[1..33].copy_from_slice(v.as_ref()); - id_raw[0..4].copy_from_slice(b"beef"); - id - }, - } -} - impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; @@ -1560,16 +1523,6 @@ pub type Migrations = migrations::Unreleased; pub mod migrations { use super::*; - /// Upgrade Session keys to include BEEFY key. - /// When this is removed, should also remove `OldSessionKeys`. - pub struct UpgradeSessionKeys; - impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { - fn on_runtime_upgrade() -> Weight { - Session::upgrade_keys::(transform_session_keys); - Perbill::from_percent(50) * BlockWeights::get().max_block - } - } - /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_im_online::migration::v1::Migration, @@ -1578,7 +1531,6 @@ pub mod migrations { assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v8::MigrateToV8, - UpgradeSessionKeys, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::MigrateToV1, pallet_nomination_pools::migration::versioned_migrations::V5toV6, -- GitLab From 5e98803f032a7c0ac2d8fae6b0cfc76417917e2a Mon Sep 17 00:00:00 2001 From: ordian Date: Thu, 16 Nov 2023 21:13:04 +0100 Subject: [PATCH 066/199] implementers-guide: update github link (#2368) cc @joepetrowski --- polkadot/roadmap/implementers-guide/book.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/roadmap/implementers-guide/book.toml b/polkadot/roadmap/implementers-guide/book.toml index 1e6680f6f4b..f677c0d59c0 100644 --- a/polkadot/roadmap/implementers-guide/book.toml +++ b/polkadot/roadmap/implementers-guide/book.toml @@ -17,6 +17,6 @@ renderer = ["html"] additional-css = ["last-changed.css"] additional-js = ["mermaid.min.js", "mermaid-init.js"] # Repository URL used in the last-changed link. -git-repository-url = "https://github.com/paritytech/polkadot" +git-repository-url = "https://github.com/paritytech/polkadot-sdk" [output.linkcheck] -- GitLab From 4ac2db8095e55a68f9b3d843d4e4689759fcc9d5 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Fri, 17 Nov 2023 00:02:56 +0100 Subject: [PATCH 067/199] Fix Typo: `PalletXcmExtrinsicsBenchmark` (#2354) Missed in https://github.com/paritytech/polkadot-sdk/pull/1672 --- .../parachains/runtimes/assets/asset-hub-kusama/src/lib.rs | 6 +++--- .../runtimes/assets/asset-hub-polkadot/src/lib.rs | 6 +++--- .../parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 6 +++--- .../parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 6 +++--- .../runtimes/collectives/collectives-polkadot/src/lib.rs | 6 +++--- .../runtimes/contracts/contracts-rococo/src/lib.rs | 6 +++--- polkadot/runtime/rococo/src/lib.rs | 6 +++--- polkadot/runtime/westend/src/lib.rs | 6 +++--- 12 files changed, 36 insertions(+), 36 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index e4ed77884bf..af0116d7014 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -962,7 +962,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1200,7 +1200,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1244,7 +1244,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 6f853b6f56e..1b7ef10f485 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -868,7 +868,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1082,7 +1082,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -1125,7 +1125,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 4492971566b..4b4ae61a3e8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -996,7 +996,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [pallet_xcm_bridge_hub_router, ToWestend] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1234,7 +1234,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1286,7 +1286,7 @@ impl_runtime_apis! { Config as XcmBridgeHubRouterConfig, }; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index cd17b9d86f7..d52edfe479c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1085,7 +1085,7 @@ mod benches { [cumulus_pallet_dmp_queue, DmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -1369,7 +1369,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use pallet_xcm_bridge_hub_router::benchmarking::Pallet as XcmBridgeHubRouterBench; // This is defined once again in dispatch_benchmark, because list_benchmarks! @@ -1416,7 +1416,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index b3750700084..d2db0340790 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -490,7 +490,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -670,7 +670,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -706,7 +706,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index 841bb4ee861..02f05a8bb87 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -491,7 +491,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -671,7 +671,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -707,7 +707,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index b8fc2fffc88..5a44ccbb75a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -554,7 +554,7 @@ mod benches { [cumulus_pallet_xcmp_queue, XcmpQueue] [cumulus_pallet_dmp_queue, DmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -784,7 +784,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -826,7 +826,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 9e8fd84e712..d1d2b4a4159 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -544,7 +544,7 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -772,7 +772,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; // This is defined once again in dispatch_benchmark, because list_benchmarks! // and add_benchmarks! are macros exported by define_benchmarks! macros and those types @@ -814,7 +814,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index 206f4614060..c3d671c9085 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -751,7 +751,7 @@ mod benches { [cumulus_pallet_dmp_queue, DmpQueue] [pallet_alliance, Alliance] [pallet_collective, AllianceMotion] - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_preimage, Preimage] [pallet_scheduler, Scheduler] [pallet_referenda, FellowshipReferenda] @@ -939,7 +939,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -969,7 +969,7 @@ impl_runtime_apis! { use cumulus_pallet_session_benchmarking::Pallet as SessionBench; impl cumulus_pallet_session_benchmarking::Config for Runtime {} - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 4c66e780ba9..5b828bad0c7 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -433,7 +433,7 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [pallet_contracts, Contracts] - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] ); } @@ -678,7 +678,7 @@ impl_runtime_apis! { use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -709,7 +709,7 @@ impl_runtime_apis! { impl cumulus_pallet_session_benchmarking::Config for Runtime {} use xcm::latest::prelude::*; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; impl pallet_xcm::benchmarking::Config for Runtime { fn reachable_dest() -> Option { Some(Parent.into()) diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 277c9981dab..675e0a20b2b 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1617,7 +1617,7 @@ mod benches { [pallet_asset_rate, AssetRate] [pallet_whitelist, Whitelist] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] [pallet_xcm_benchmarks::fungible, pallet_xcm_benchmarks::fungible::Pallet::] [pallet_xcm_benchmarks::generic, pallet_xcm_benchmarks::generic::Pallet::] ); @@ -2096,7 +2096,7 @@ sp_api::impl_runtime_apis! { use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; let mut list = Vec::::new(); list_benchmarks!(list, extra); @@ -2115,7 +2115,7 @@ sp_api::impl_runtime_apis! { use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; use frame_benchmarking::baseline::Pallet as Baseline; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use sp_storage::TrackedStorageKey; use xcm::latest::prelude::*; use xcm_config::{ diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 4d537832ae3..16a0b112a0b 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1606,7 +1606,7 @@ mod benches { [pallet_whitelist, Whitelist] [pallet_asset_rate, AssetRate] // XCM - [pallet_xcm, PalletXcmExtrinsiscsBenchmark::] + [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. [pallet_xcm_benchmarks::fungible, XcmBalances] [pallet_xcm_benchmarks::generic, XcmGeneric] @@ -2129,7 +2129,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; @@ -2157,7 +2157,7 @@ sp_api::impl_runtime_apis! { use pallet_session_benchmarking::Pallet as SessionBench; use pallet_offences_benchmarking::Pallet as OffencesBench; use pallet_election_provider_support_benchmarking::Pallet as ElectionProviderBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsiscsBenchmark; + use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; use frame_system_benchmarking::Pallet as SystemBench; use pallet_nomination_pools_benchmarking::Pallet as NominationPoolsBench; -- GitLab From 596088a273eff13fbefe6fa20a5fef6507b329cb Mon Sep 17 00:00:00 2001 From: Bruno Galvao Date: Fri, 17 Nov 2023 01:31:31 -0500 Subject: [PATCH 068/199] add pallet nomination-pools versioned migration to kitchensink (#2167) The versioned migrations are already there in pallet nomination-pools: https://github.com/paritytech/polkadot-sdk/blob/f6ee4781f633f0f89598f7b230595afe401da8dc/substrate/frame/nomination-pools/src/migration.rs#L27-L48 Just updating the kitchensink runtime to point to them. This is also nice because it points the dev to an example of how to use `VersionedMigration`. --- polkadot/runtime/westend/src/lib.rs | 4 ++-- substrate/bin/node/runtime/src/lib.rs | 5 +++-- substrate/frame/nomination-pools/src/migration.rs | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 16a0b112a0b..29183fdfe00 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1533,9 +1533,9 @@ pub mod migrations { parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::MigrateToV1, - pallet_nomination_pools::migration::versioned_migrations::V5toV6, + pallet_nomination_pools::migration::versioned::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, - pallet_nomination_pools::migration::versioned_migrations::V6ToV7, + pallet_nomination_pools::migration::versioned::V6ToV7, pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, ); diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 90946b71311..d7beb29becf 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2194,9 +2194,10 @@ pub type Executive = frame_executive::Executive< >; // All migrations executed on runtime upgrade as a nested tuple of types implementing -// `OnRuntimeUpgrade`. +// `OnRuntimeUpgrade`. Note: These are examples and do not need to be run directly +// after the genesis block. type Migrations = ( - pallet_nomination_pools::migration::v2::MigrateToV2, + pallet_nomination_pools::migration::versioned::V6ToV7, pallet_alliance::migration::Migration, pallet_contracts::Migration, ); diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index eef2a976f1a..3d68fee1dca 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -24,7 +24,7 @@ use sp_std::{collections::btree_map::BTreeMap, vec::Vec}; use sp_runtime::TryRuntimeError; /// Exports for versioned migration `type`s for this pallet. -pub mod versioned_migrations { +pub mod versioned { use super::*; /// Migration V6 to V7 wrapped in a [`frame_support::migrations::VersionedMigration`], ensuring -- GitLab From b85c64aa5fa945027c7a2bfe1788e09309febc7a Mon Sep 17 00:00:00 2001 From: cuteolaf Date: Thu, 16 Nov 2023 23:20:09 -0800 Subject: [PATCH 069/199] fix typo (#2377) --- substrate/frame/proxy/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/proxy/README.md b/substrate/frame/proxy/README.md index bfe26d9aefb..c52a881c590 100644 --- a/substrate/frame/proxy/README.md +++ b/substrate/frame/proxy/README.md @@ -2,7 +2,7 @@ A module allowing accounts to give permission to other accounts to dispatch types of calls from their signed origin. -The accounts to which permission is delegated may be requied to announce the action that they +The accounts to which permission is delegated may be required to announce the action that they wish to execute some duration prior to execution happens. In this case, the target account may reject the announcement and in doing so, veto the execution. -- GitLab From 20723ea80e0f442487764a9bcde1d4c989958e56 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Fri, 17 Nov 2023 08:21:42 -0300 Subject: [PATCH 070/199] bump zombienet version `v1.3.80` (#2376) New release includes logic to move all jobs to `spot instances`. Thx! --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1dc483004f2..b485ca3317e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.79" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.80" DOCKER_IMAGES_VERSION: "${CI_COMMIT_SHA}" default: -- GitLab From 2e001de9346257321a8e2447d22e86d7c71a917a Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Fri, 17 Nov 2023 13:48:31 +0100 Subject: [PATCH 071/199] [NPoS] Check if staker is exposed in paged exposure storage entries (#2369) Addresses a bug caused by https://github.com/paritytech/polkadot-sdk/pull/1189. The changes are still not released yet, so would like to push the fix soon so it can go together with the release of the above PR. `fast_unstake` checks if a staker is exposed in an era. However, this fn is still returning whether the staker is exposed based on the old storage item. This PR fixes that by looking in both old and new exposure storages. Also adds some integrity tests for paged exposures. --- substrate/frame/staking/src/pallet/impls.rs | 74 ++++++++++++++++++++- substrate/frame/staking/src/tests.rs | 16 +++++ 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 9c36c94b87b..40f30735258 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -794,7 +794,7 @@ impl Pallet { stash: T::AccountId, exposure: Exposure>, ) { - >::insert(¤t_era, &stash, &exposure); + EraInfo::::set_exposure(current_era, &stash, exposure); } #[cfg(feature = "runtime-benchmarks")] @@ -1745,9 +1745,16 @@ impl StakingInterface for Pallet { } fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { + // look in the non paged exposures + // FIXME: Can be cleaned up once non paged exposures are cleared (https://github.com/paritytech/polkadot-sdk/issues/433) ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { validator == *who || exposures.others.iter().any(|i| i.who == *who) }) + || + // look in the paged exposures + ErasStakersPaged::::iter_prefix((era,)).any(|((validator, _), exposure_page)| { + validator == *who || exposure_page.others.iter().any(|i| i.who == *who) + }) } fn status( who: &Self::AccountId, @@ -1812,6 +1819,7 @@ impl Pallet { Self::check_nominators()?; Self::check_exposures()?; + Self::check_paged_exposures()?; Self::check_ledgers()?; Self::check_count() } @@ -1860,6 +1868,70 @@ impl Pallet { .collect::>() } + fn check_paged_exposures() -> Result<(), TryRuntimeError> { + use sp_staking::PagedExposureMetadata; + use sp_std::collections::btree_map::BTreeMap; + + // Sanity check for the paged exposure of the active era. + let mut exposures: BTreeMap>> = + BTreeMap::new(); + let era = Self::active_era().unwrap().index; + let accumulator_default = PagedExposureMetadata { + total: Zero::zero(), + own: Zero::zero(), + nominator_count: 0, + page_count: 0, + }; + + ErasStakersPaged::::iter_prefix((era,)) + .map(|((validator, _page), expo)| { + ensure!( + expo.page_total == + expo.others.iter().map(|e| e.value).fold(Zero::zero(), |acc, x| acc + x), + "wrong total exposure for the page.", + ); + + let metadata = exposures.get(&validator).unwrap_or(&accumulator_default); + exposures.insert( + validator, + PagedExposureMetadata { + total: metadata.total + expo.page_total, + own: metadata.own, + nominator_count: metadata.nominator_count + expo.others.len() as u32, + page_count: metadata.page_count + 1, + }, + ); + + Ok(()) + }) + .collect::>()?; + + exposures + .iter() + .map(|(validator, metadata)| { + let actual_overview = ErasStakersOverview::::get(era, validator); + + ensure!(actual_overview.is_some(), "No overview found for a paged exposure"); + let actual_overview = actual_overview.unwrap(); + + ensure!( + actual_overview.total == metadata.total + actual_overview.own, + "Exposure metadata does not have correct total exposed stake." + ); + ensure!( + actual_overview.nominator_count == metadata.nominator_count, + "Exposure metadata does not have correct count of nominators." + ); + ensure!( + actual_overview.page_count == metadata.page_count, + "Exposure metadata does not have correct count of pages." + ); + + Ok(()) + }) + .collect::>() + } + fn check_nominators() -> Result<(), TryRuntimeError> { // a check per nominator to ensure their entire stake is correctly distributed. Will only // kick-in if the nomination was submitted before the current era. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index ee6f67adf14..bac2530b19b 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -6637,6 +6637,14 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( ); assert_eq!(EraInfo::::get_page_count(1, &11), 2); + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + // case 2: exposure exist in ErasStakers and ErasStakersClipped (legacy). // delete paged storage and add exposure to clipped storage >::remove((1, 11, 0)); @@ -6672,6 +6680,14 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( assert_eq!(actual_exposure_full.own, 1000); assert_eq!(actual_exposure_full.total, total_exposure); + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + // for pages other than 0, clipped storage returns empty exposure assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1), None); // page size is 1 for clipped storage -- GitLab From 3ab2bc9ff3c6ecf14694f01e3c7e8f394219585a Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 17 Nov 2023 15:24:20 +0200 Subject: [PATCH 072/199] Beefy: small fixes (#2378) Related to #2285 - save the state of the BEEFY gadget after processing a finality proof. We need this in order to avoid skipping blocks. - avoid reprocessing the old state when not necessary --- .../client/consensus/beefy/src/worker.rs | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 309d8c5135b..966c7410365 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -456,6 +456,7 @@ where .filter(|genesis| *genesis == self.persisted_state.pallet_genesis) .ok_or(Error::ConsensusReset)?; + let mut new_session_added = false; if *header.number() > self.best_grandpa_block() { // update best GRANDPA finalized block we have seen self.persisted_state.set_best_grandpa(header.clone()); @@ -475,9 +476,15 @@ where { if let Some(new_validator_set) = find_authorities_change::(&header) { self.init_session_at(new_validator_set, *header.number()); + new_session_added = true; } } + if new_session_added { + crate::aux_schema::write_voter_state(&*self.backend, &self.persisted_state) + .map_err(|e| Error::Backend(e.to_string()))?; + } + // Update gossip validator votes filter. if let Err(e) = self .persisted_state @@ -848,15 +855,10 @@ where .fuse(), ); + self.process_new_state(); let error = loop { - // Act on changed 'state'. - self.process_new_state(); - // Mutable reference used to drive the gossip engine. let mut gossip_engine = &mut self.comms.gossip_engine; - // Use temp val and report after async section, - // to avoid having to Mutex-wrap `gossip_engine`. - let mut gossip_report: Option = None; // Wait for, and handle external events. // The branches below only change 'state', actual voting happens afterwards, @@ -884,10 +886,15 @@ where if let Err(err) = self.triage_incoming_justif(justif) { debug!(target: LOG_TARGET, "🥩 {}", err); } - gossip_report = Some(peer_report); + self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit); + }, + ResponseInfo::PeerReport(peer_report) => { + self.comms.gossip_engine.report(peer_report.who, peer_report.cost_benefit); + continue; + }, + ResponseInfo::Pending => { + continue; }, - ResponseInfo::PeerReport(peer_report) => gossip_report = Some(peer_report), - ResponseInfo::Pending => (), } }, justif = block_import_justif.next() => { @@ -924,12 +931,15 @@ where }, // Process peer reports. report = self.comms.gossip_report_stream.next() => { - gossip_report = report; + if let Some(PeerReport { who, cost_benefit }) = report { + self.comms.gossip_engine.report(who, cost_benefit); + } + continue; }, } - if let Some(PeerReport { who, cost_benefit }) = gossip_report { - self.comms.gossip_engine.report(who, cost_benefit); - } + + // Act on changed 'state'. + self.process_new_state(); }; // return error _and_ `comms` that can be reused -- GitLab From 5007e2dd5cb5514fe11ad1bdb21ea9c7fb0ae5ca Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 17 Nov 2023 14:43:37 +0100 Subject: [PATCH 073/199] crypto: `lazy_static` removed, light parser for address URI added (#2250) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `lazy_static` package does not work well in `no-std`: it requires `spin_no_std` feature, which also will propagate into `std` if enabled. This is not what we want. This PR provides simple address uri parser which allows to get rid of _regex_ which was used to parse the address uri, what in turns allows to remove lazy_static. Three regular expressions (`SS58_REGEX`,`SECRET_PHRASE_REGEX`,`JUNCTION_REGEX`) were replaced with the parser which unifies all of them. The new parser does not support Unicode, it is ASCII only. Related to: #2044 --------- Co-authored-by: Bastian Köcher Co-authored-by: Koute Co-authored-by: command-bot <> --- Cargo.lock | 45 +- Cargo.toml | 1 + ...age_ensure_span_are_ok_on_wrong_gen.stderr | 12 +- ...re_span_are_ok_on_wrong_gen_unnamed.stderr | 12 +- substrate/primitives/core/Cargo.toml | 6 +- substrate/primitives/core/fuzz/Cargo.toml | 20 + .../fuzz/fuzz_targets/fuzz_address_uri.rs | 53 +++ substrate/primitives/core/src/address_uri.rs | 432 ++++++++++++++++++ substrate/primitives/core/src/crypto.rs | 85 ++-- substrate/primitives/core/src/lib.rs | 2 + 10 files changed, 601 insertions(+), 67 deletions(-) create mode 100644 substrate/primitives/core/fuzz/Cargo.toml create mode 100644 substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs create mode 100644 substrate/primitives/core/src/address_uri.rs diff --git a/Cargo.lock b/Cargo.lock index 7854042ddbc..64da4b8996c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7408,6 +7408,17 @@ dependencies = [ "rle-decode-fast", ] +[[package]] +name = "libfuzzer-sys" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a96cfd5557eb82f2b83fed4955246c988d331975a002961b07c81584d107e7f7" +dependencies = [ + "arbitrary", + "cc", + "once_cell", +] + [[package]] name = "libloading" version = "0.7.4" @@ -8182,9 +8193,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memfd" @@ -14297,14 +14308,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -14321,10 +14332,16 @@ name = "regex-automata" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" + +[[package]] +name = "regex-automata" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.2", ] [[package]] @@ -14335,9 +14352,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "remote-ext-tests-bags-list" @@ -17535,6 +17552,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sp-core-fuzz" +version = "0.0.0" +dependencies = [ + "lazy_static", + "libfuzzer-sys", + "regex", + "sp-core", +] + [[package]] name = "sp-core-hashing" version = "9.0.0" diff --git a/Cargo.toml b/Cargo.toml index ed252e07053..57079aa4d03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -405,6 +405,7 @@ members = [ "substrate/primitives/consensus/sassafras", "substrate/primitives/consensus/slots", "substrate/primitives/core", + "substrate/primitives/core/fuzz", "substrate/primitives/core/hashing", "substrate/primitives/core/hashing/proc-macro", "substrate/primitives/crypto/ec-utils", diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 7375bcd2f16..b5d10827524 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -6,8 +6,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -44,8 +44,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -81,8 +81,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -119,8 +119,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -137,8 +137,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -177,8 +177,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 3a0a25712aa..b58902590b8 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -6,8 +6,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -44,8 +44,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -81,8 +81,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -119,8 +119,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others @@ -137,8 +137,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied | = help: the following other types implement trait `WrapperTypeDecode`: Box - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc = note: required for `Bar` to implement `Decode` = note: required for `Bar` to implement `FullCodec` @@ -177,8 +177,8 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied bytes::bytes::Bytes Cow<'a, T> parity_scale_codec::Ref<'a, T, U> - frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Rc + frame_support::sp_runtime::sp_application_crypto::sp_core::Bytes Arc Vec and $N others diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 79df81e62c6..9ecce0a22f5 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -26,10 +26,8 @@ bs58 = { version = "0.5.0", default-features = false, optional = true } rand = { version = "0.8.5", features = ["small_rng"], optional = true } substrate-bip39 = { version = "0.4.4", optional = true } bip39 = { version = "2.0.0", default-features = false } -regex = { version = "1.6.0", optional = true } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } -lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.12.1", optional = true } ss58-registry = { version = "1.34.0", default-features = false } sp-std = { path = "../std", default-features = false} @@ -63,6 +61,8 @@ bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "cbc342e", [dev-dependencies] criterion = "0.4.0" serde_json = "1.0.108" +lazy_static = "1.4.0" +regex = "1.6.0" sp-core-hashing-proc-macro = { path = "hashing/proc-macro" } [[bench]] @@ -92,7 +92,6 @@ std = [ "hash256-std-hasher/std", "impl-serde/std", "itertools", - "lazy_static", "libsecp256k1/std", "log/std", "merlin/std", @@ -102,7 +101,6 @@ std = [ "primitive-types/serde", "primitive-types/std", "rand", - "regex", "scale-info/std", "schnorrkel/std", "secp256k1/global-context", diff --git a/substrate/primitives/core/fuzz/Cargo.toml b/substrate/primitives/core/fuzz/Cargo.toml new file mode 100644 index 00000000000..9a094b07d4a --- /dev/null +++ b/substrate/primitives/core/fuzz/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "sp-core-fuzz" +version = "0.0.0" +publish = false + +[package.metadata] +cargo-fuzz = true + +[dependencies] +lazy_static = "1.4.0" +libfuzzer-sys = "0.4" +regex = "1.10.2" + +sp-core = { path = ".." } + +[[bin]] +name = "fuzz_address_uri" +path = "fuzz_targets/fuzz_address_uri.rs" +test = false +doc = false diff --git a/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs new file mode 100644 index 00000000000..e2d9e2fc8b0 --- /dev/null +++ b/substrate/primitives/core/fuzz/fuzz_targets/fuzz_address_uri.rs @@ -0,0 +1,53 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_main] + +extern crate libfuzzer_sys; +extern crate regex; +extern crate sp_core; + +use libfuzzer_sys::fuzz_target; +use regex::Regex; +use sp_core::crypto::AddressUri; + +lazy_static::lazy_static! { + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); +} + +fuzz_target!(|input: &str| { + let regex_result = SECRET_PHRASE_REGEX.captures(input); + let manual_result = AddressUri::parse(input); + assert_eq!(regex_result.is_some(), manual_result.is_ok()); + if manual_result.is_err() { + let _ = format!("{}", manual_result.as_ref().err().unwrap()); + } + if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) { + assert_eq!(regex_result.name("phrase").map(|p| p.as_str()), manual_result.phrase); + + let manual_paths = manual_result + .paths + .iter() + .map(|s| "/".to_string() + s) + .collect::>() + .join(""); + + assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths); + assert_eq!(regex_result.name("password").map(|pass| pass.as_str()), manual_result.pass); + } +}); diff --git a/substrate/primitives/core/src/address_uri.rs b/substrate/primitives/core/src/address_uri.rs new file mode 100644 index 00000000000..862747c9a4b --- /dev/null +++ b/substrate/primitives/core/src/address_uri.rs @@ -0,0 +1,432 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Little util for parsing an address URI. Replaces regular expressions. + +#[cfg(all(not(feature = "std"), any(feature = "serde", feature = "full_crypto")))] +use sp_std::{ + alloc::string::{String, ToString}, + vec::Vec, +}; + +/// A container for results of parsing the address uri string. +/// +/// Intended to be equivalent of: +/// `Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$")` +/// which also handles soft and hard derivation paths: +/// `Regex::new(r"/(/?[^/]+)")` +/// +/// Example: +/// ``` +/// use sp_core::crypto::AddressUri; +/// let manual_result = AddressUri::parse("hello world/s//h///pass"); +/// assert_eq!( +/// manual_result.unwrap(), +/// AddressUri { phrase: Some("hello world"), paths: vec!["s", "/h"], pass: Some("pass") } +/// ); +/// ``` +#[derive(Debug, PartialEq)] +pub struct AddressUri<'a> { + /// Phrase, hexadecimal string, or ss58-compatible string. + pub phrase: Option<&'a str>, + /// Key derivation paths, ordered as in input string, + pub paths: Vec<&'a str>, + /// Password. + pub pass: Option<&'a str>, +} + +/// Errors that are possible during parsing the address URI. +#[allow(missing_docs)] +#[cfg_attr(feature = "std", derive(thiserror::Error))] +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Error { + #[cfg_attr(feature = "std", error("Invalid character in phrase:\n{0}"))] + InvalidCharacterInPhrase(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Invalid character in password:\n{0}"))] + InvalidCharacterInPass(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Missing character in hard path:\n{0}"))] + MissingCharacterInHardPath(InvalidCharacterInfo), + #[cfg_attr(feature = "std", error("Missing character in soft path:\n{0}"))] + MissingCharacterInSoftPath(InvalidCharacterInfo), +} + +impl Error { + /// Creates an instance of `Error::InvalidCharacterInPhrase` using given parameters. + pub fn in_phrase(input: &str, pos: usize) -> Self { + Self::InvalidCharacterInPhrase(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::InvalidCharacterInPass` using given parameters. + pub fn in_pass(input: &str, pos: usize) -> Self { + Self::InvalidCharacterInPass(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::MissingCharacterInHardPath` using given parameters. + pub fn in_hard_path(input: &str, pos: usize) -> Self { + Self::MissingCharacterInHardPath(InvalidCharacterInfo::new(input, pos)) + } + /// Creates an instance of `Error::MissingCharacterInSoftPath` using given parameters. + pub fn in_soft_path(input: &str, pos: usize) -> Self { + Self::MissingCharacterInSoftPath(InvalidCharacterInfo::new(input, pos)) + } +} + +/// Complementary error information. +/// +/// Strucutre contains complementary information about parsing address URI string. +/// String contains a copy of an original URI string, 0-based integer indicates position of invalid +/// character. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct InvalidCharacterInfo(String, usize); + +impl InvalidCharacterInfo { + fn new(info: &str, pos: usize) -> Self { + Self(info.to_string(), pos) + } +} + +impl sp_std::fmt::Display for InvalidCharacterInfo { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let (s, pos) = escape_string(&self.0, self.1); + write!(f, "{s}\n{i}^", i = sp_std::iter::repeat(" ").take(pos).collect::()) + } +} + +/// Escapes the control characters in given string, and recomputes the position if some characters +/// were actually escaped. +fn escape_string(input: &str, pos: usize) -> (String, usize) { + let mut out = String::with_capacity(2 * input.len()); + let mut out_pos = 0; + input + .chars() + .enumerate() + .map(|(i, c)| { + let esc = |c| (i, Some('\\'), c, 2); + match c { + '\t' => esc('t'), + '\n' => esc('n'), + '\r' => esc('r'), + '\x07' => esc('a'), + '\x08' => esc('b'), + '\x0b' => esc('v'), + '\x0c' => esc('f'), + _ => (i, None, c, 1), + } + }) + .for_each(|(i, maybe_escape, c, increment)| { + maybe_escape.map(|e| out.push(e)); + out.push(c); + if i < pos { + out_pos += increment; + } + }); + (out, out_pos) +} + +fn extract_prefix<'a>(input: &mut &'a str, is_allowed: &dyn Fn(char) -> bool) -> Option<&'a str> { + let output = input.trim_start_matches(is_allowed); + let prefix_len = input.len() - output.len(); + let prefix = if prefix_len > 0 { Some(&input[..prefix_len]) } else { None }; + *input = output; + prefix +} + +fn strip_prefix(input: &mut &str, prefix: &str) -> bool { + if let Some(stripped_input) = input.strip_prefix(prefix) { + *input = stripped_input; + true + } else { + false + } +} + +impl<'a> AddressUri<'a> { + /// Parses the given string. + pub fn parse(mut input: &'a str) -> Result { + let initial_input = input; + let initial_input_len = input.len(); + let phrase = extract_prefix(&mut input, &|ch: char| { + ch.is_ascii_digit() || ch.is_ascii_alphabetic() || ch == ' ' + }); + + let mut pass = None; + let mut paths = Vec::new(); + while !input.is_empty() { + let unstripped_input = input; + if strip_prefix(&mut input, "///") { + pass = Some(extract_prefix(&mut input, &|ch: char| ch != '\n').unwrap_or("")); + } else if strip_prefix(&mut input, "//") { + let path = extract_prefix(&mut input, &|ch: char| ch != '/') + .ok_or(Error::in_hard_path(initial_input, initial_input_len - input.len()))?; + assert!(path.len() > 0); + // hard path shall contain leading '/', so take it from unstripped input. + paths.push(&unstripped_input[1..path.len() + 2]); + } else if strip_prefix(&mut input, "/") { + paths.push( + extract_prefix(&mut input, &|ch: char| ch != '/').ok_or( + Error::in_soft_path(initial_input, initial_input_len - input.len()), + )?, + ); + } else { + return Err(if pass.is_some() { + Error::in_pass(initial_input, initial_input_len - input.len()) + } else { + Error::in_phrase(initial_input, initial_input_len - input.len()) + }); + } + } + + Ok(Self { phrase, paths, pass }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use regex::Regex; + + lazy_static::lazy_static! { + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[a-zA-Z0-9 ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); + } + + fn check_with_regex(input: &str) { + let regex_result = SECRET_PHRASE_REGEX.captures(input); + let manual_result = AddressUri::parse(input); + assert_eq!(regex_result.is_some(), manual_result.is_ok()); + if let (Some(regex_result), Ok(manual_result)) = (regex_result, manual_result) { + assert_eq!( + regex_result.name("phrase").map(|phrase| phrase.as_str()), + manual_result.phrase + ); + + let manual_paths = manual_result + .paths + .iter() + .map(|s| "/".to_string() + s) + .collect::>() + .join(""); + + assert_eq!(regex_result.name("path").unwrap().as_str().to_string(), manual_paths); + assert_eq!( + regex_result.name("password").map(|phrase| phrase.as_str()), + manual_result.pass + ); + } + } + + fn check(input: &str, result: Result) { + let manual_result = AddressUri::parse(input); + assert_eq!(manual_result, result); + check_with_regex(input); + } + + #[test] + fn test00() { + check("///", Ok(AddressUri { phrase: None, pass: Some(""), paths: vec![] })); + } + + #[test] + fn test01() { + check("////////", Ok(AddressUri { phrase: None, pass: Some("/////"), paths: vec![] })) + } + + #[test] + fn test02() { + check( + "sdasd///asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: Some("asda"), paths: vec![] }), + ); + } + + #[test] + fn test03() { + check( + "sdasd//asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asda"] }), + ); + } + + #[test] + fn test04() { + check("sdasd//a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/a"] })); + } + + #[test] + fn test05() { + let input = "sdasd//"; + check(input, Err(Error::in_hard_path(input, 7))); + } + + #[test] + fn test06() { + check( + "sdasd/xx//asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/asda"] }), + ); + } + + #[test] + fn test07() { + check( + "sdasd/xx//a/b//c///pass", + Ok(AddressUri { + phrase: Some("sdasd"), + pass: Some("pass"), + paths: vec!["xx", "/a", "b", "/c"], + }), + ); + } + + #[test] + fn test08() { + check( + "sdasd/xx//a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["xx", "/a"] }), + ); + } + + #[test] + fn test09() { + let input = "sdasd/xx//"; + check(input, Err(Error::in_hard_path(input, 10))); + } + + #[test] + fn test10() { + check( + "sdasd/asda", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda"] }), + ); + } + + #[test] + fn test11() { + check( + "sdasd/asda//x", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asda", "/x"] }), + ); + } + + #[test] + fn test12() { + check("sdasd/a", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["a"] })); + } + + #[test] + fn test13() { + let input = "sdasd/"; + check(input, Err(Error::in_soft_path(input, 6))); + } + + #[test] + fn test14() { + check("sdasd", Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec![] })); + } + + #[test] + fn test15() { + let input = "sdasd."; + check(input, Err(Error::in_phrase(input, 5))); + } + + #[test] + fn test16() { + let input = "sd.asd/asd.a"; + check(input, Err(Error::in_phrase(input, 2))); + } + + #[test] + fn test17() { + let input = "sd.asd//asd.a"; + check(input, Err(Error::in_phrase(input, 2))); + } + + #[test] + fn test18() { + check( + "sdasd/asd.a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["asd.a"] }), + ); + } + + #[test] + fn test19() { + check( + "sdasd//asd.a", + Ok(AddressUri { phrase: Some("sdasd"), pass: None, paths: vec!["/asd.a"] }), + ); + } + + #[test] + fn test20() { + let input = "///\n"; + check(input, Err(Error::in_pass(input, 3))); + } + + #[test] + fn test21() { + let input = "///a\n"; + check(input, Err(Error::in_pass(input, 4))); + } + + #[test] + fn test22() { + let input = "sd asd///asd.a\n"; + check(input, Err(Error::in_pass(input, 14))); + } + + #[test] + fn test_invalid_char_info_1() { + let expected = "01234\n^"; + let f = format!("{}", InvalidCharacterInfo::new("01234", 0)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_2() { + let expected = "01\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("01", 1)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_3() { + let expected = "01234\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("01234", 2)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_4() { + let expected = "012\\n456\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\n456", 3)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_5() { + let expected = "012\\n456\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\n456", 5)); + assert_eq!(expected, f); + } + + #[test] + fn test_invalid_char_info_6() { + let expected = "012\\f456\\t89\n ^"; + let f = format!("{}", InvalidCharacterInfo::new("012\x0c456\t89", 9)); + assert_eq!(expected, f); + } +} diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index d369de5a1c0..c9719e344d3 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -25,8 +25,6 @@ use codec::{Decode, Encode, MaxEncodedLen}; use itertools::Itertools; #[cfg(feature = "std")] use rand::{rngs::OsRng, RngCore}; -#[cfg(feature = "std")] -use regex::Regex; use scale_info::TypeInfo; #[cfg(feature = "std")] pub use secrecy::{ExposeSecret, SecretString}; @@ -43,6 +41,11 @@ pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58Addres /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; +#[cfg(feature = "std")] +pub use crate::address_uri::AddressUri; +#[cfg(any(feature = "std", feature = "full_crypto"))] +pub use crate::address_uri::Error as AddressUriError; + /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; @@ -82,8 +85,8 @@ impl> UncheckedInto for S { #[cfg(feature = "full_crypto")] pub enum SecretStringError { /// The overall format was invalid (e.g. the seed phrase contained symbols). - #[cfg_attr(feature = "std", error("Invalid format"))] - InvalidFormat, + #[cfg_attr(feature = "std", error("Invalid format {0}"))] + InvalidFormat(AddressUriError), /// The seed phrase provided is not a valid BIP39 phrase. #[cfg_attr(feature = "std", error("Invalid phrase"))] InvalidPhrase, @@ -101,6 +104,13 @@ pub enum SecretStringError { InvalidPath, } +#[cfg(any(feature = "std", feature = "full_crypto"))] +impl From for SecretStringError { + fn from(e: AddressUriError) -> Self { + Self::InvalidFormat(e) + } +} + /// An error when deriving a key. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(Debug, Clone, PartialEq, Eq)] @@ -208,7 +218,7 @@ impl> From for DeriveJunction { /// An error type for SS58 decoding. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[cfg_attr(not(feature = "std"), derive(Debug))] -#[derive(Clone, Copy, Eq, PartialEq)] +#[derive(Clone, Eq, PartialEq)] #[allow(missing_docs)] #[cfg(any(feature = "full_crypto", feature = "serde"))] pub enum PublicError { @@ -235,6 +245,11 @@ pub enum PublicError { InvalidPath, #[cfg_attr(feature = "std", error("Disallowed SS58 Address Format for this datatype."))] FormatNotAllowed, + #[cfg_attr(feature = "std", error("Password not allowed."))] + PasswordNotAllowed, + #[cfg(feature = "std")] + #[cfg_attr(feature = "std", error("Incorrect URI syntax {0}."))] + MalformedUri(#[from] AddressUriError), } #[cfg(feature = "std")] @@ -414,47 +429,40 @@ pub fn set_default_ss58_version(new_default: Ss58AddressFormat) { DEFAULT_VERSION.store(new_default.into(), core::sync::atomic::Ordering::Relaxed); } -#[cfg(feature = "std")] -lazy_static::lazy_static! { - static ref SS58_REGEX: Regex = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - static ref JUNCTION_REGEX: Regex = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); -} - #[cfg(feature = "std")] impl + AsRef<[u8]> + Public + Derive> Ss58Codec for T { fn from_string(s: &str) -> Result { - let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); + let cap = AddressUri::parse(s)?; + if cap.pass.is_some() { + return Err(PublicError::PasswordNotAllowed); + } + let s = cap.phrase.unwrap_or(DEV_ADDRESS); let addr = if let Some(stripped) = s.strip_prefix("0x") { let d = array_bytes::hex2bytes(stripped).map_err(|_| PublicError::InvalidFormat)?; Self::from_slice(&d).map_err(|()| PublicError::BadLength)? } else { Self::from_ss58check(s)? }; - if cap["path"].is_empty() { + if cap.paths.is_empty() { Ok(addr) } else { - let path = - JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); - addr.derive(path).ok_or(PublicError::InvalidPath) + addr.derive(cap.paths.iter().map(DeriveJunction::from)) + .ok_or(PublicError::InvalidPath) } } fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let (addr, v) = Self::from_ss58check_with_version( - cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS), - )?; - if cap["path"].is_empty() { + let cap = AddressUri::parse(s)?; + if cap.pass.is_some() { + return Err(PublicError::PasswordNotAllowed); + } + let (addr, v) = Self::from_ss58check_with_version(cap.phrase.unwrap_or(DEV_ADDRESS))?; + if cap.paths.is_empty() { Ok((addr, v)) } else { - let path = - JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); - addr.derive(path).ok_or(PublicError::InvalidPath).map(|a| (a, v)) + addr.derive(cap.paths.iter().map(DeriveJunction::from)) + .ok_or(PublicError::InvalidPath) + .map(|a| (a, v)) } } } @@ -817,22 +825,15 @@ impl sp_std::str::FromStr for SecretUri { type Err = SecretStringError; fn from_str(s: &str) -> Result { - let cap = SECRET_PHRASE_REGEX.captures(s).ok_or(SecretStringError::InvalidFormat)?; - - let junctions = JUNCTION_REGEX - .captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])) - .collect::>(); - - let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); - let password = cap.name("password"); + let cap = AddressUri::parse(s)?; + let phrase = cap.phrase.unwrap_or(DEV_PHRASE); Ok(Self { phrase: SecretString::from_str(phrase).expect("Returns infallible error; qed"), - password: password.map(|v| { - SecretString::from_str(v.as_str()).expect("Returns infallible error; qed") - }), - junctions, + password: cap + .pass + .map(|v| SecretString::from_str(v).expect("Returns infallible error; qed")), + junctions: cap.paths.iter().map(DeriveJunction::from).collect::>(), }) } } diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index ec0641c5466..4873d1a2112 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -55,6 +55,8 @@ pub mod crypto; pub mod hexdisplay; pub use paste; +#[cfg(any(feature = "full_crypto", feature = "std"))] +mod address_uri; #[cfg(feature = "bandersnatch-experimental")] pub mod bandersnatch; #[cfg(feature = "bls-experimental")] -- GitLab From 490fb66537c60e1a391350b3f0a786efb7c17373 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 17 Nov 2023 16:40:49 +0200 Subject: [PATCH 074/199] [trivial] asset-hubs runtimes: fix incorrect doc-comments (#2384) Fix some incorrect doc-comments --- .../runtimes/assets/asset-hub-rococo/src/xcm_config.rs | 4 ++-- .../runtimes/assets/asset-hub-westend/src/xcm_config.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index b85cb76642f..6e04924f038 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -109,7 +109,7 @@ pub type CurrencyTransactor = CurrencyAdapter< (), >; -/// `AssetId`/`Balance` converter for `PoolAssets`. +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -130,7 +130,7 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `ForeignAssets`. pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< ( // Ignore `TrustBackedAssets` explicitly diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 17312c0f46e..6942559671b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -109,7 +109,7 @@ pub type CurrencyTransactor = CurrencyAdapter< (), >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `TrustBackedAssets`. pub type TrustBackedAssetsConvertedConcreteId = assets_common::TrustBackedAssetsConvertedConcreteId; @@ -130,7 +130,7 @@ pub type FungiblesTransactor = FungiblesAdapter< CheckingAccount, >; -/// `AssetId/Balance` converter for `TrustBackedAssets` +/// `AssetId`/`Balance` converter for `ForeignAssets`. pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< ( // Ignore `TrustBackedAssets` explicitly -- GitLab From 079b14f624bef9dd901f93c29424cba0ebbd325d Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 17 Nov 2023 18:01:08 +0200 Subject: [PATCH 075/199] Do not panic if the `fdlimit` call to increase the file descriptor limit fails (#2155) # Description Sometimes changing file descriptor limits is not allowed, but there is no need to crash the node if/when this happens. Since `fdlimit`'s author decided to use panics instead of returning `Result`, we need to catch it. # Checklist - [x] My PR includes a detailed description as outlined in the "Description" section above - [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` required) - [ ] I have made corresponding changes to the documentation (if applicable) - [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) --------- Co-authored-by: Koute --- Cargo.lock | 13 +++++++------ substrate/client/cli/Cargo.toml | 2 +- substrate/client/cli/src/config.rs | 23 +++++++++++++++++------ substrate/client/service/test/Cargo.toml | 2 +- substrate/client/service/test/src/lib.rs | 2 +- 5 files changed, 27 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 64da4b8996c..da266eb5d47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5342,11 +5342,12 @@ dependencies = [ [[package]] name = "fdlimit" -version = "0.2.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c4c9e43643f5a3be4ca5b67d26b98031ff9db6806c3440ae32e02e3ceac3f1b" +checksum = "e182f7dbc2ef73d9ef67351c5fbbea084729c48362d3ce9dd44c28e32e277fe5" dependencies = [ "libc", + "thiserror", ] [[package]] @@ -19321,9 +19322,9 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] @@ -19350,9 +19351,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index c415527c372..c4464c5f787 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "6.1" chrono = "0.4.27" clap = { version = "4.4.6", features = ["derive", "string", "wrap_help"] } -fdlimit = "0.2.1" +fdlimit = "0.3.0" futures = "0.3.21" itertools = "0.10.3" libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]} diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 4d218da6aa8..b842df5a690 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -605,14 +605,25 @@ pub trait CliConfiguration: Sized { logger.init()?; - if let Some(new_limit) = fdlimit::raise_fd_limit() { - if new_limit < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + match fdlimit::raise_fd_limit() { + Ok(fdlimit::Outcome::LimitRaised { to, .. }) => + if to < RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT { + warn!( + "Low open file descriptor limit configured for the process. \ + Current value: {:?}, recommended value: {:?}.", + to, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + ); + }, + Ok(fdlimit::Outcome::Unsupported) => { + // Unsupported platform (non-Linux) + }, + Err(error) => { warn!( - "Low open file descriptor limit configured for the process. \ - Current value: {:?}, recommended value: {:?}.", - new_limit, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, + "Failed to configure file descriptor limit for the process: \ + {}, recommended value: {:?}.", + error, RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT, ); - } + }, } Ok(()) diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index 670312e4161..c6091f97d63 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] async-channel = "1.8.0" array-bytes = "6.1" -fdlimit = "0.2.1" +fdlimit = "0.3.0" futures = "0.3.21" log = "0.4.17" parity-scale-codec = "3.6.1" diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 9700c7643c4..456df73459a 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -285,7 +285,7 @@ where base_port: u16, ) -> TestNet { sp_tracing::try_init_simple(); - fdlimit::raise_fd_limit(); + fdlimit::raise_fd_limit().unwrap(); let runtime = Runtime::new().expect("Error creating tokio runtime"); let mut net = TestNet { runtime, -- GitLab From 0385902cd05523155832b155fe7ce4d1eaf7b779 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Fri, 17 Nov 2023 17:06:45 +0100 Subject: [PATCH 076/199] Relax `force_default_xcm_version` for testnet system parachains (#2385) This PR fixes two things: - relax `force_default_xcm_version` for testnet system parachains (e.g. BridgeHubWestend has now 2 and there is no way to change it to 3, so we need to call `force_xcm_version(3)` for every parachain that it is connected to, because we send XCMv3 messages) - add `Storage` item to `PolkadotXcm` pallet definition (now we cannot see storage items for `pallet_xcm` in PJS) ## TODO - [ ] when merged open PR to `polkadot-fellows/runtimes` repo --- .../assets/asset-hub-rococo/src/xcm_config.rs | 16 +++++++++------- .../assets/asset-hub-westend/src/xcm_config.rs | 16 +++++++++------- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../bridge-hub-rococo/src/xcm_config.rs | 16 +++++++++------- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../bridge-hub-westend/src/xcm_config.rs | 16 +++++++++------- .../collectives-westend/src/xcm_config.rs | 6 ++++-- 7 files changed, 42 insertions(+), 32 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 6e04924f038..4da0a2500a5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -275,13 +275,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index 6942559671b..4760e087e24 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -272,13 +272,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 5a44ccbb75a..8e138822696 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -498,7 +498,7 @@ construct_runtime!( // XCM helpers. XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 1436c5b96a3..de7b5315c88 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -161,13 +161,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index d1d2b4a4159..9c97728058f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -497,7 +497,7 @@ construct_runtime!( // XCM helpers. XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, + PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index 7084882c41f..c89ee91c5e4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -150,13 +150,15 @@ impl Contains for SafeCallFilter { matches!( call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::System( + frame_system::Call::set_heap_pages { .. } | + frame_system::Call::set_code { .. } | + frame_system::Call::set_code_without_checks { .. } | + frame_system::Call::kill_prefix { .. }, + ) | RuntimeCall::ParachainSystem(..) | RuntimeCall::Timestamp(..) | RuntimeCall::Balances(..) | RuntimeCall::CollatorSelection( diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index d58995827fa..cefc099c96f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -178,8 +178,10 @@ impl Contains for SafeCallFilter { pallet_collator_selection::Call::add_invulnerable { .. } | pallet_collator_selection::Call::remove_invulnerable { .. }, ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::XcmpQueue(..) | + RuntimeCall::PolkadotXcm( + pallet_xcm::Call::force_xcm_version { .. } | + pallet_xcm::Call::force_default_xcm_version { .. } + ) | RuntimeCall::XcmpQueue(..) | RuntimeCall::MessageQueue(..) | RuntimeCall::Alliance( // `init_members` accepts unbounded vecs as arguments, -- GitLab From 9e34163aca9d58d7351451833d9cd214b266e7f0 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 17 Nov 2023 17:10:46 +0100 Subject: [PATCH 077/199] Make collator RPC mode non-experimental (#2381) The `--relay-chain-rpc-urls` CLI flag has been available for a while now. We have collators with this running and parachain teams are also using it. It should be fine now to remove the experimental status. --- cumulus/client/cli/src/lib.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs index 1b18ed06437..a2238b73b2b 100644 --- a/cumulus/client/cli/src/lib.rs +++ b/cumulus/client/cli/src/lib.rs @@ -296,7 +296,14 @@ pub struct RunCmd { #[arg(long, conflicts_with = "validator")] pub collator: bool, - /// EXPERIMENTAL: Specify an URL to a relay chain full node to communicate with. + /// Creates a less resource-hungry node that retrieves relay chain data from an RPC endpoint. + /// + /// The provided URLs should point to RPC endpoints of the relay chain. + /// This node connects to the remote nodes following the order they were specified in. If the + /// connection fails, it attempts to connect to the next endpoint in the list. + /// + /// Note: This option doesn't stop the node from connecting to the relay chain network but + /// reduces bandwidth use. #[arg( long, value_parser = validate_relay_chain_url, -- GitLab From 82912acb33a9030c0ef3bf590a34fca09b72dc5f Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Fri, 17 Nov 2023 20:14:45 +0400 Subject: [PATCH 078/199] Fix migrations and add CI check for new system chains (#2336) Westend Collectives migration CI check can be fixed once we have https://github.com/paritytech/try-runtime-cli/pull/58, will open another PR once it is available. - [x] Remove deprecated `DmpQueue` pallet from Rococo Contracts, the migration is complete - [x] Fix Asset Hub Rococo storage versions - [x] Add migration check CI for Asset Hub Rococo and Westend Bridge Hub --- .gitlab/pipeline/check.yml | 25 ++++++++ .../assets/asset-hub-rococo/src/lib.rs | 59 ++++++++++++++++++- .../contracts/contracts-rococo/src/lib.rs | 1 - .../contracts-rococo/src/xcm_config.rs | 6 -- 4 files changed, 82 insertions(+), 9 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 4071fdf9758..429491fb174 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -151,6 +151,31 @@ check-runtime-migration-asset-hub-westend: PACKAGE: "asset-hub-westend-runtime" WASM: "asset_hub_westend_runtime.compact.compressed.wasm" URI: "wss://westend-asset-hub-rpc.polkadot.io:443" + +check-runtime-migration-asset-hub-rococo: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "asset-hub-rococo" + PACKAGE: "asset-hub-rococo-runtime" + WASM: "asset_hub_rococo_runtime.compact.compressed.wasm" + URI: "wss://rococo-asset-hub-rpc.polkadot.io:443" + +# Check runtime migrations for Parity managed bridge hub chains +check-runtime-migration-bridge-hub-westend: + stage: check + extends: + - .docker-env + - .test-pr-refs + - .check-runtime-migration + variables: + NETWORK: "bridge-hub-westend" + PACKAGE: "bridge-hub-westend-runtime" + WASM: "bridge_hub_westend_runtime.compact.compressed.wasm" + URI: "wss://westend-bridge-hub-rpc.polkadot.io:443" check-runtime-migration-bridge-hub-rococo: stage: check diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 4b4ae61a3e8..b274f45877b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -39,7 +39,9 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, + traits::{ + AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify, + }, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Permill, }; @@ -959,7 +961,60 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,); +pub type Migrations = + (pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions); + +/// Migration to initialize storage versions for pallets added after genesis. +/// +/// This is now done automatically (see ), +/// but some pallets had made it in and had storage set in them for this parachain before it was +/// merged. +pub struct InitStorageVersions; + +impl frame_support::traits::OnRuntimeUpgrade for InitStorageVersions { + fn on_runtime_upgrade() -> Weight { + use frame_support::traits::{GetStorageVersion, StorageVersion}; + + let mut writes = 0; + + if PolkadotXcm::on_chain_storage_version() == StorageVersion::new(0) { + PolkadotXcm::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Multisig::on_chain_storage_version() == StorageVersion::new(0) { + Multisig::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Assets::on_chain_storage_version() == StorageVersion::new(0) { + Assets::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Uniques::on_chain_storage_version() == StorageVersion::new(0) { + Uniques::current_storage_version().put::(); + writes.saturating_inc(); + } + + if Nfts::on_chain_storage_version() == StorageVersion::new(0) { + Nfts::current_storage_version().put::(); + writes.saturating_inc(); + } + + if ForeignAssets::on_chain_storage_version() == StorageVersion::new(0) { + ForeignAssets::current_storage_version().put::(); + writes.saturating_inc(); + } + + if PoolAssets::on_chain_storage_version() == StorageVersion::new(0) { + PoolAssets::current_storage_version().put::(); + writes.saturating_inc(); + } + + ::DbWeight::get().reads_writes(7, writes) + } +} /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 5b828bad0c7..9d6a53c5ed3 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -405,7 +405,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Smart Contracts. diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs index faee1c68fe6..2ac93aed3f8 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/xcm_config.rs @@ -304,9 +304,3 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime { parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = cumulus_pallet_dmp_queue::weights::SubstrateWeight; - type RuntimeEvent = crate::RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} -- GitLab From 1d1c3719487c8b9f9b770e5c33e8e8b237bd8824 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 17 Nov 2023 23:48:34 +0100 Subject: [PATCH 079/199] Bump bandersnatch VRF revision (#2389) Closes https://github.com/paritytech/polkadot-sdk/issues/2327 cc @burdges --- Cargo.lock | 40 ++++++++++++++-------------- substrate/primitives/core/Cargo.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da266eb5d47..ed9997101fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -544,7 +544,7 @@ dependencies = [ [[package]] name = "ark-secret-scalar" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ec", "ark-ff", @@ -593,7 +593,7 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ff", "ark-serialize", @@ -1369,8 +1369,8 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" -version = "0.0.3" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +version = "0.0.4" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-bls12-381", "ark-ec", @@ -1493,8 +1493,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ "bitcoin_hashes", - "rand 0.8.5", - "rand_core 0.6.4", + "rand 0.7.3", + "rand_core 0.5.1", "serde", "unicode-normalization", ] @@ -3065,7 +3065,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#edd1e90b847e560bf60fc2e8712235ccfa11a9a9" +source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f" dependencies = [ "ark-ec", "ark-ff", @@ -4814,7 +4814,7 @@ checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" [[package]] name = "dleq_vrf" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=cbc342e#cbc342e95d3cbcd3c5ba8d45af7200eb58e63502" +source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" dependencies = [ "ark-ec", "ark-ff", @@ -5389,7 +5389,7 @@ dependencies = [ [[package]] name = "fflonk" version = "0.1.0" -source = "git+https://github.com/w3f/fflonk#26a5045b24e169cffc1f9328ca83d71061145c40" +source = "git+https://github.com/w3f/fflonk#1beb0585e1c8488956fac7f05da061f9b41e8948" dependencies = [ "ark-ec", "ark-ff", @@ -14445,7 +14445,7 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#edd1e90b847e560bf60fc2e8712235ccfa11a9a9" +source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f" dependencies = [ "ark-ec", "ark-ff", @@ -17607,7 +17607,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -17645,7 +17645,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "proc-macro2", "quote", @@ -17665,7 +17665,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "environmental", "parity-scale-codec", @@ -17897,7 +17897,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -17926,7 +17926,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "Inflector", "proc-macro-crate", @@ -18054,7 +18054,7 @@ version = "8.0.0" [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" [[package]] name = "sp-storage" @@ -18071,7 +18071,7 @@ dependencies = [ [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "impl-serde", "parity-scale-codec", @@ -18120,7 +18120,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "parity-scale-codec", "sp-std 8.0.0 (git+https://github.com/paritytech/polkadot-sdk)", @@ -18221,7 +18221,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#fe9435db2fda7c9e2f4e29521564c72cac38f59b" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -20008,7 +20008,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.8.5", + "rand 0.7.3", "static_assertions", ] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 9ecce0a22f5..25478bed2d9 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -56,7 +56,7 @@ sp-runtime-interface = { path = "../runtime-interface", default-features = false # bls crypto w3f-bls = { version = "0.1.3", default-features = false, optional = true} # bandersnatch crypto -bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "cbc342e", default-features = false, optional = true } +bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "3ddc205", default-features = false, optional = true } [dev-dependencies] criterion = "0.4.0" -- GitLab From 794ee98049f546d39923b7f34ffcb7e719ae349d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 18 Nov 2023 13:58:16 +0200 Subject: [PATCH 080/199] Bump secp256k1 from 0.24.3 to 0.28.0 (#2357) Bumps [secp256k1](https://github.com/rust-bitcoin/rust-secp256k1) from 0.24.3 to 0.28.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=secp256k1&package-manager=cargo&previous-version=0.24.3&new-version=0.28.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Liam Aharon --- Cargo.lock | 8 ++++---- substrate/primitives/core/Cargo.toml | 2 +- substrate/primitives/core/src/ecdsa.rs | 13 ++++--------- substrate/primitives/io/Cargo.toml | 2 +- substrate/primitives/io/src/lib.rs | 4 ++-- 5 files changed, 12 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ed9997101fb..ac64e65ee0e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16602,18 +16602,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.24.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b1629c9c557ef9b293568b338dddfc8208c98a18c59d722a9d53f859d9c9b62" +checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.6.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" +checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" dependencies = [ "cc", ] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 25478bed2d9..34485c72ab0 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -49,7 +49,7 @@ blake2 = { version = "0.10.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } merlin = { version = "2.0", default-features = false } -secp256k1 = { version = "0.24.0", default-features = false, features = ["recovery", "alloc"], optional = true } +secp256k1 = { version = "0.28.0", default-features = false, features = ["recovery", "alloc"], optional = true } sp-core-hashing = { path = "hashing", default-features = false, optional = true } sp-runtime-interface = { path = "../runtime-interface", default-features = false} diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index 603fa515a30..471714582a6 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -336,7 +336,7 @@ impl Signature { pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { let rid = RecoveryId::from_i32(self.0[64] as i32).ok()?; let sig = RecoverableSignature::from_compact(&self.0[..64], rid).ok()?; - let message = Message::from_slice(message).expect("Message is 32 bytes; qed"); + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); #[cfg(feature = "std")] let context = SECP256K1; @@ -458,7 +458,7 @@ impl Pair { /// Sign a pre-hashed message pub fn sign_prehashed(&self, message: &[u8; 32]) -> Signature { - let message = Message::from_slice(message).expect("Message is 32 bytes; qed"); + let message = Message::from_digest_slice(message).expect("Message is 32 bytes; qed"); #[cfg(feature = "std")] let context = SECP256K1; @@ -508,12 +508,7 @@ impl Pair { #[cfg(feature = "full_crypto")] impl Drop for Pair { fn drop(&mut self) { - let ptr = self.secret.as_mut_ptr(); - for off in 0..self.secret.len() { - unsafe { - core::ptr::write_volatile(ptr.add(off), 0); - } - } + self.secret.non_secure_erase() } } @@ -760,7 +755,7 @@ mod test { let msg = [0u8; 32]; let sig1 = pair.sign_prehashed(&msg); let sig2: Signature = { - let message = Message::from_slice(&msg).unwrap(); + let message = Message::from_digest_slice(&msg).unwrap(); SECP256K1.sign_ecdsa_recoverable(&message, &pair.secret).into() }; assert_eq!(sig1, sig2); diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index 445104b736e..59df8895bb7 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -28,7 +28,7 @@ sp-trie = { path = "../trie", default-features = false, optional = true} sp-externalities = { path = "../externalities", default-features = false} sp-tracing = { path = "../tracing", default-features = false} log = { version = "0.4.17", optional = true } -secp256k1 = { version = "0.24.0", features = ["recovery", "global-context"], optional = true } +secp256k1 = { version = "0.28.0", features = ["recovery", "global-context"], optional = true } tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.28", default-features = false} diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index c4182d6ab3a..a300152ee66 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -1139,7 +1139,7 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadV)?; let sig = RecoverableSignature::from_compact(&sig[..64], rid) .map_err(|_| EcdsaVerifyError::BadRS)?; - let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed"); + let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed"); let pubkey = SECP256K1 .recover_ecdsa(&msg, &sig) .map_err(|_| EcdsaVerifyError::BadSignature)?; @@ -1185,7 +1185,7 @@ pub trait Crypto { .map_err(|_| EcdsaVerifyError::BadV)?; let sig = RecoverableSignature::from_compact(&sig[..64], rid) .map_err(|_| EcdsaVerifyError::BadRS)?; - let msg = Message::from_slice(msg).expect("Message is 32 bytes; qed"); + let msg = Message::from_digest_slice(msg).expect("Message is 32 bytes; qed"); let pubkey = SECP256K1 .recover_ecdsa(&msg, &sig) .map_err(|_| EcdsaVerifyError::BadSignature)?; -- GitLab From b5858936e13a8adc800655478c965daf54dbdc71 Mon Sep 17 00:00:00 2001 From: Julian Eager Date: Mon, 20 Nov 2023 02:04:22 +0800 Subject: [PATCH 081/199] Preserve artifact cache unless stale (#1918) Co-authored-by: Marcin S --- Cargo.lock | 8 +- .../node/core/candidate-validation/src/lib.rs | 6 +- .../core/candidate-validation/src/tests.rs | 12 +- polkadot/node/core/pvf/Cargo.toml | 1 + .../benches/host_prepare_rococo_runtime.rs | 2 +- polkadot/node/core/pvf/common/Cargo.toml | 3 + polkadot/node/core/pvf/common/build.rs | 19 + polkadot/node/core/pvf/common/src/error.rs | 18 +- polkadot/node/core/pvf/common/src/lib.rs | 2 + polkadot/node/core/pvf/common/src/prepare.rs | 19 + polkadot/node/core/pvf/common/src/pvf.rs | 2 +- .../node/core/pvf/prepare-worker/Cargo.toml | 1 + .../node/core/pvf/prepare-worker/src/lib.rs | 37 +- polkadot/node/core/pvf/src/artifacts.rs | 383 +++++++++++++----- polkadot/node/core/pvf/src/host.rs | 125 +++--- polkadot/node/core/pvf/src/lib.rs | 2 +- polkadot/node/core/pvf/src/prepare/pool.rs | 10 +- polkadot/node/core/pvf/src/prepare/queue.rs | 28 +- .../node/core/pvf/src/prepare/worker_intf.rs | 61 +-- polkadot/node/core/pvf/src/worker_intf.rs | 4 +- polkadot/node/core/pvf/tests/it/main.rs | 5 +- .../utils/build-script-utils/src/version.rs | 31 ++ 22 files changed, 535 insertions(+), 244 deletions(-) create mode 100644 polkadot/node/core/pvf/common/build.rs diff --git a/Cargo.lock b/Cargo.lock index ac64e65ee0e..b2d655e6f41 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1585,16 +1585,15 @@ dependencies = [ [[package]] name = "blake3" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" +checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", "arrayvec 0.7.4", "cc", "cfg-if", "constant_time_eq 0.3.0", - "digest 0.10.7", ] [[package]] @@ -12568,6 +12567,7 @@ version = "1.0.0" dependencies = [ "always-assert", "assert_matches", + "blake3", "cfg-if", "criterion 0.4.0", "futures", @@ -12646,6 +12646,7 @@ dependencies = [ "sp-externalities 0.19.0", "sp-io", "sp-tracing 10.0.0", + "substrate-build-script-utils", "tempfile", "thiserror", "tracing-gum", @@ -12670,6 +12671,7 @@ dependencies = [ name = "polkadot-node-core-pvf-prepare-worker" version = "1.0.0" dependencies = [ + "blake3", "cfg-if", "criterion 0.4.0", "libc", diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index a3d6f047313..96ab07d3b82 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -25,7 +25,7 @@ use polkadot_node_core_pvf::{ InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PrepareError, - PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost, + PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, }; use polkadot_node_primitives::{ BlockData, InvalidCandidate, PoV, ValidationResult, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT, @@ -794,7 +794,7 @@ trait ValidationBackend { validation_result } - async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result; + async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError>; } #[async_trait] @@ -824,7 +824,7 @@ impl ValidationBackend for ValidationHost { })? } - async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, pvf: PvfPrepData) -> Result<(), PrepareError> { let (tx, rx) = oneshot::channel(); if let Err(err) = self.precheck_pvf(pvf, tx).await { // Return an IO error if there was an error communicating with the host. diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index cab823e1e63..801c581938a 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -377,7 +377,7 @@ impl ValidationBackend for MockValidateCandidateBackend { result } - async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result<(), PrepareError> { unreachable!() } } @@ -1014,11 +1014,11 @@ fn pov_decompression_failure_is_invalid() { } struct MockPreCheckBackend { - result: Result, + result: Result<(), PrepareError>, } impl MockPreCheckBackend { - fn with_hardcoded_result(result: Result) -> Self { + fn with_hardcoded_result(result: Result<(), PrepareError>) -> Self { Self { result } } } @@ -1034,7 +1034,7 @@ impl ValidationBackend for MockPreCheckBackend { unreachable!() } - async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result { + async fn precheck_pvf(&mut self, _pvf: PvfPrepData) -> Result<(), PrepareError> { self.result.clone() } } @@ -1051,7 +1051,7 @@ fn precheck_works() { let (check_fut, check_result) = precheck_pvf( ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(PrepareStats::default())), + MockPreCheckBackend::with_hardcoded_result(Ok(())), relay_parent, validation_code_hash, ) @@ -1113,7 +1113,7 @@ fn precheck_invalid_pvf_blob_compression() { let (check_fut, check_result) = precheck_pvf( ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(PrepareStats::default())), + MockPreCheckBackend::with_hardcoded_result(Ok(())), relay_parent, validation_code_hash, ) diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 3e72ca9e532..27da484fe4f 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -8,6 +8,7 @@ license.workspace = true [dependencies] always-assert = "0.1" +blake3 = "1.5" cfg-if = "1.0" futures = "0.3.21" futures-timer = "3.0.2" diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index 378374a10b3..c02a0b595da 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -56,7 +56,7 @@ impl TestHost { &self, code: &[u8], executor_params: ExecutorParams, - ) -> Result { + ) -> Result<(), PrepareError> { let (result_tx, result_rx) = futures::channel::oneshot::channel(); let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index e3fda06963e..bfe1be9156f 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -36,6 +36,9 @@ seccompiler = "0.4.0" assert_matches = "1.4.0" tempfile = "3.3.0" +[build-dependencies] +substrate-build-script-utils = { path = "../../../../../substrate/utils/build-script-utils" } + [features] # This feature is used to export test code to other crates without putting it in the production build. test-utils = [] diff --git a/polkadot/node/core/pvf/common/build.rs b/polkadot/node/core/pvf/common/build.rs new file mode 100644 index 00000000000..5531ad411da --- /dev/null +++ b/polkadot/node/core/pvf/common/build.rs @@ -0,0 +1,19 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +fn main() { + substrate_build_script_utils::generate_wasmtime_version(); +} diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index 34475c481f7..6bf05ece78e 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -14,16 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use crate::prepare::PrepareStats; +use crate::prepare::{PrepareSuccess, PrepareWorkerSuccess}; use parity_scale_codec::{Decode, Encode}; use std::fmt; -/// Result of PVF preparation performed by the validation host. Contains stats about the preparation -/// if successful -pub type PrepareResult = Result; +/// Result of PVF preparation from a worker, with checksum of the compiled PVF and stats of the +/// preparation if successful. +pub type PrepareWorkerResult = Result; + +/// Result of PVF preparation propagated all the way back to the host, with path to the concluded +/// artifact and stats of the preparation if successful. +pub type PrepareResult = Result; + +/// Result of prechecking PVF performed by the validation host. Contains stats about the preparation +/// if successful. +pub type PrecheckResult = Result<(), PrepareError>; /// An error that occurred during the prepare part of the PVF pipeline. -// Codec indexes are intended to stabilize pre-encoded payloads (see `OOM_PAYLOAD` below) +// Codec indexes are intended to stabilize pre-encoded payloads (see `OOM_PAYLOAD`) #[derive(Debug, Clone, Encode, Decode)] pub enum PrepareError { /// During the prevalidation stage of preparation an issue was found with the PVF. diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index e2211b97d87..282d2f7c41d 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -31,6 +31,8 @@ pub use sp_tracing; const LOG_TARGET: &str = "parachain::pvf-common"; +pub const RUNTIME_VERSION: &str = env!("SUBSTRATE_WASMTIME_VERSION"); + use std::{ io::{self, Read, Write}, mem, diff --git a/polkadot/node/core/pvf/common/src/prepare.rs b/polkadot/node/core/pvf/common/src/prepare.rs index 4436ebe4861..28ab682ec13 100644 --- a/polkadot/node/core/pvf/common/src/prepare.rs +++ b/polkadot/node/core/pvf/common/src/prepare.rs @@ -15,6 +15,25 @@ // along with Polkadot. If not, see . use parity_scale_codec::{Decode, Encode}; +use std::path::PathBuf; + +/// Result from prepare worker if successful. +#[derive(Debug, Clone, Default, Encode, Decode)] +pub struct PrepareWorkerSuccess { + /// Checksum of the compiled PVF. + pub checksum: String, + /// Stats of the current preparation run. + pub stats: PrepareStats, +} + +/// Result of PVF preparation if successful. +#[derive(Debug, Clone, Default)] +pub struct PrepareSuccess { + /// Canonical path to the compiled artifact. + pub path: PathBuf, + /// Stats of the current preparation run. + pub stats: PrepareStats, +} /// Preparation statistics, including the CPU time and memory taken. #[derive(Debug, Clone, Default, Encode, Decode)] diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index 0cc86434c19..2d8f6430187 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -115,7 +115,7 @@ impl fmt::Debug for PvfPrepData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "Pvf {{ code, code_hash: {:?}, executor_params: {:?}, prep_timeout: {:?} }}", + "Pvf {{ code: [...], code_hash: {:?}, executor_params: {:?}, prep_timeout: {:?} }}", self.code_hash, self.executor_params, self.prep_timeout ) } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 1cd221533f4..005f2e93511 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -7,6 +7,7 @@ edition.workspace = true license.workspace = true [dependencies] +blake3 = "1.5" cfg-if = "1.0" gum = { package = "tracing-gum", path = "../../../gum" } libc = "0.2.139" diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index 151b54efc2d..34e6a78c26a 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -40,10 +40,10 @@ use nix::{ use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult}, + error::{PrepareError, PrepareWorkerResult}, executor_intf::create_runtime_from_artifact_bytes, framed_recv_blocking, framed_send_blocking, - prepare::{MemoryStats, PrepareJobKind, PrepareStats}, + prepare::{MemoryStats, PrepareJobKind, PrepareStats, PrepareWorkerSuccess}, pvf::PvfPrepData, worker::{ cpu_time_monitor_loop, run_worker, stringify_panic_payload, @@ -106,7 +106,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result { } /// Send a worker response. -fn send_response(stream: &mut UnixStream, result: PrepareResult) -> io::Result<()> { +fn send_response(stream: &mut UnixStream, result: PrepareWorkerResult) -> io::Result<()> { framed_send_blocking(stream, &result.encode()) } @@ -186,8 +186,8 @@ fn end_memory_tracking() -> isize { /// /// 7. If compilation succeeded, write the compiled artifact into a temporary file. /// -/// 8. Send the result of preparation back to the host. If any error occurred in the above steps, we -/// send that in the `PrepareResult`. +/// 8. Send the result of preparation back to the host, including the checksum of the artifact. If +/// any error occurred in the above steps, we send that in the `PrepareWorkerResult`. pub fn worker_entrypoint( socket_path: PathBuf, worker_dir_path: PathBuf, @@ -439,11 +439,11 @@ fn handle_child_process( Err(err) => Err(err), Ok(ok) => { cfg_if::cfg_if! { - if #[cfg(target_os = "linux")] { - let (artifact, max_rss) = ok; - } else { - let artifact = ok; - } + if #[cfg(target_os = "linux")] { + let (artifact, max_rss) = ok; + } else { + let artifact = ok; + } } // Stop the memory stats worker and get its observed memory stats. @@ -511,7 +511,7 @@ fn handle_parent_process( worker_pid: u32, usage_before: Usage, timeout: Duration, -) -> Result { +) -> Result { // Read from the child. Don't decode unless the process exited normally, which we check later. let mut received_data = Vec::new(); pipe_read @@ -554,7 +554,7 @@ fn handle_parent_process( match result { Err(err) => Err(err), - Ok(response) => { + Ok(JobResponse { artifact, memory_stats }) => { // The exit status should have been zero if no error occurred. if exit_status != 0 { return Err(PrepareError::JobError(format!( @@ -577,13 +577,14 @@ fn handle_parent_process( temp_artifact_dest.display(), ); // Write to the temp file created by the host. - if let Err(err) = fs::write(&temp_artifact_dest, &response.artifact) { + if let Err(err) = fs::write(&temp_artifact_dest, &artifact) { return Err(PrepareError::IoErr(err.to_string())) }; - Ok(PrepareStats { - memory_stats: response.memory_stats, - cpu_time_elapsed: cpu_tv, + let checksum = blake3::hash(&artifact.as_ref()).to_hex().to_string(); + Ok(PrepareWorkerSuccess { + checksum, + stats: PrepareStats { memory_stats, cpu_time_elapsed: cpu_tv }, }) }, } @@ -657,13 +658,13 @@ fn error_from_errno(context: &'static str, errno: Errno) -> PrepareError { type JobResult = Result; -/// Pre-encoded length-prefixed `Result::Err(PrepareError::OutOfMemory)` +/// Pre-encoded length-prefixed `JobResult::Err(PrepareError::OutOfMemory)` const OOM_PAYLOAD: &[u8] = b"\x02\x00\x00\x00\x00\x00\x00\x00\x01\x08"; #[test] fn pre_encoded_payloads() { // NOTE: This must match the type of `response` in `send_child_response`. - let oom_unencoded: JobResult = Result::Err(PrepareError::OutOfMemory); + let oom_unencoded: JobResult = JobResult::Err(PrepareError::OutOfMemory); let oom_encoded = oom_unencoded.encode(); // The payload is prefixed with its length in `framed_send`. let mut oom_payload = oom_encoded.len().to_le_bytes().to_vec(); diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index dd83f76494e..53085eade3c 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -16,10 +16,10 @@ //! PVF artifacts (final compiled code blobs). //! -//! # Lifecycle of an artifact +//! # Lifecycle of an artifact //! -//! 1. During node start-up, the artifacts cache is cleaned up. This means that all local artifacts -//! stored on-disk are cleared, and we start with an empty [`Artifacts`] table. +//! 1. During node start-up, we will check the cached artifacts, if any. The stale and corrupted +//! ones are pruned. The valid ones are registered in the [`Artifacts`] table. //! //! 2. In order to be executed, a PVF should be prepared first. This means that artifacts should //! have an [`ArtifactState::Prepared`] entry for that artifact in the table. If not, the @@ -55,18 +55,29 @@ //! older by a predefined parameter. This process is run very rarely (say, once a day). Once the //! artifact is expired it is removed from disk eagerly atomically. -use crate::host::PrepareResultSender; +use crate::{host::PrecheckResultSender, LOG_TARGET}; use always_assert::always; -use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData}; +use polkadot_core_primitives::Hash; +use polkadot_node_core_pvf_common::{ + error::PrepareError, prepare::PrepareStats, pvf::PvfPrepData, RUNTIME_VERSION, +}; use polkadot_node_primitives::NODE_VERSION; use polkadot_parachain_primitives::primitives::ValidationCodeHash; use polkadot_primitives::ExecutorParamsHash; use std::{ collections::HashMap, path::{Path, PathBuf}, + str::FromStr as _, time::{Duration, SystemTime}, }; +const RUNTIME_PREFIX: &str = "wasmtime_v"; +const NODE_PREFIX: &str = "polkadot_v"; + +fn artifact_prefix() -> String { + format!("{}{}_{}{}", RUNTIME_PREFIX, RUNTIME_VERSION, NODE_PREFIX, NODE_VERSION) +} + /// Identifier of an artifact. Encodes a code hash of the PVF and a hash of executor parameter set. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ArtifactId { @@ -75,9 +86,6 @@ pub struct ArtifactId { } impl ArtifactId { - const PREFIX: &'static str = "wasmtime_"; - const NODE_VERSION_PREFIX: &'static str = "polkadot_v"; - /// Creates a new artifact ID with the given hash. pub fn new(code_hash: ValidationCodeHash, executor_params_hash: ExecutorParamsHash) -> Self { Self { code_hash, executor_params_hash } @@ -88,38 +96,34 @@ impl ArtifactId { Self::new(pvf.code_hash(), pvf.executor_params().hash()) } - /// Tries to recover the artifact id from the given file name. - #[cfg(test)] - pub fn from_file_name(file_name: &str) -> Option { - use polkadot_core_primitives::Hash; - use std::str::FromStr as _; - - let file_name = - file_name.strip_prefix(Self::PREFIX)?.strip_prefix(Self::NODE_VERSION_PREFIX)?; - - // [ node version | code hash | param hash ] - let parts: Vec<&str> = file_name.split('_').collect(); - let (_node_ver, code_hash_str, executor_params_hash_str) = (parts[0], parts[1], parts[2]); - - let code_hash = Hash::from_str(code_hash_str).ok()?.into(); - let executor_params_hash = - ExecutorParamsHash::from_hash(Hash::from_str(executor_params_hash_str).ok()?); - - Some(Self { code_hash, executor_params_hash }) - } - - /// Returns the expected path to this artifact given the root of the cache. - pub fn path(&self, cache_path: &Path) -> PathBuf { + /// Returns the canonical path to the concluded artifact. + pub(crate) fn path(&self, cache_path: &Path, checksum: &str) -> PathBuf { let file_name = format!( - "{}{}{}_{:#x}_{:#x}", - Self::PREFIX, - Self::NODE_VERSION_PREFIX, - NODE_VERSION, + "{}_{:#x}_{:#x}_0x{}", + artifact_prefix(), self.code_hash, - self.executor_params_hash + self.executor_params_hash, + checksum ); cache_path.join(file_name) } + + /// Tries to recover the artifact id from the given file name. + /// Return `None` if the given file name is invalid. + /// VALID_NAME := _ _ _ + fn from_file_name(file_name: &str) -> Option { + let file_name = file_name.strip_prefix(&artifact_prefix())?.strip_prefix('_')?; + let parts: Vec<&str> = file_name.split('_').collect(); + + if let [code_hash, param_hash, _checksum] = parts[..] { + let code_hash = Hash::from_str(code_hash).ok()?.into(); + let executor_params_hash = + ExecutorParamsHash::from_hash(Hash::from_str(param_hash).ok()?); + return Some(Self { code_hash, executor_params_hash }) + } + + None + } } /// A bundle of the artifact ID and the path. @@ -136,8 +140,8 @@ pub struct ArtifactPathId { } impl ArtifactPathId { - pub(crate) fn new(artifact_id: ArtifactId, cache_path: &Path) -> Self { - Self { path: artifact_id.path(cache_path), id: artifact_id } + pub(crate) fn new(artifact_id: ArtifactId, path: &Path) -> Self { + Self { id: artifact_id, path: path.to_owned() } } } @@ -148,6 +152,8 @@ pub enum ArtifactState { /// That means that the artifact should be accessible through the path obtained by the artifact /// id (unless, it was removed externally). Prepared { + /// The path of the compiled artifact. + path: PathBuf, /// The time when the artifact was last needed. /// /// This is updated when we get the heads up for this artifact or when we just discover @@ -159,7 +165,7 @@ pub enum ArtifactState { /// A task to prepare this artifact is scheduled. Preparing { /// List of result senders that are waiting for a response. - waiting_for_response: Vec, + waiting_for_response: Vec, /// The number of times this artifact has failed to prepare. num_failures: u32, }, @@ -177,32 +183,148 @@ pub enum ArtifactState { /// A container of all known artifact ids and their states. pub struct Artifacts { - artifacts: HashMap, + inner: HashMap, } impl Artifacts { - /// Initialize a blank cache at the given path. This will clear everything present at the - /// given path, to be populated over time. - /// - /// The recognized artifacts will be filled in the table and unrecognized will be removed. - pub async fn new(cache_path: &Path) -> Self { - // First delete the entire cache. This includes artifacts and any leftover worker dirs (see - // [`WorkerDir`]). Nodes are long-running so this should populate shortly. - let _ = tokio::fs::remove_dir_all(cache_path).await; + #[cfg(test)] + pub(crate) fn empty() -> Self { + Self { inner: HashMap::new() } + } + + #[cfg(test)] + pub(crate) fn len(&self) -> usize { + self.inner.len() + } + + /// Create an empty table and populate it with valid artifacts as [`ArtifactState::Prepared`], + /// if any. The existing caches will be checked by their file name to determine whether they are + /// valid, e.g., matching the current node version. The ones deemed invalid will be pruned. + pub async fn new_and_prune(cache_path: &Path) -> Self { + let mut artifacts = Self { inner: HashMap::new() }; + artifacts.insert_and_prune(cache_path).await; + artifacts + } + + async fn insert_and_prune(&mut self, cache_path: &Path) { + async fn is_corrupted(path: &Path) -> bool { + let checksum = match tokio::fs::read(path).await { + Ok(bytes) => blake3::hash(&bytes), + Err(err) => { + // just remove the file if we cannot read it + gum::warn!( + target: LOG_TARGET, + ?err, + "unable to read artifact {:?} when checking integrity, removing...", + path, + ); + return true + }, + }; + + if let Some(file_name) = path.file_name() { + if let Some(file_name) = file_name.to_str() { + return !file_name.ends_with(checksum.to_hex().as_str()) + } + } + true + } + + // Insert the entry into the artifacts table if it is valid. + // Otherwise, prune it. + async fn insert_or_prune( + artifacts: &mut Artifacts, + entry: &tokio::fs::DirEntry, + cache_path: &Path, + ) { + let file_type = entry.file_type().await; + let file_name = entry.file_name(); + + match file_type { + Ok(file_type) => + if !file_type.is_file() { + return + }, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?err, + "unable to get file type for {:?}", + file_name, + ); + return + }, + } + + if let Some(file_name) = file_name.to_str() { + let id = ArtifactId::from_file_name(file_name); + let path = cache_path.join(file_name); + + if id.is_none() || is_corrupted(&path).await { + gum::warn!( + target: LOG_TARGET, + "discarding invalid artifact {:?}", + &path, + ); + let _ = tokio::fs::remove_file(&path).await; + return + } + + if let Some(id) = id { + gum::debug!( + target: LOG_TARGET, + "reusing existing {:?} for node version v{}", + &path, + NODE_VERSION, + ); + artifacts.insert_prepared(id, path, SystemTime::now(), Default::default()); + } + } else { + gum::warn!( + target: LOG_TARGET, + "non-Unicode file name {:?} found in {:?}", + file_name, + cache_path, + ); + } + } + // Make sure that the cache path directory and all its parents are created. let _ = tokio::fs::create_dir_all(cache_path).await; - Self { artifacts: HashMap::new() } - } + let mut dir = match tokio::fs::read_dir(cache_path).await { + Ok(dir) => dir, + Err(err) => { + gum::error!( + target: LOG_TARGET, + ?err, + "failed to read dir {:?}", + cache_path, + ); + return + }, + }; - #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { artifacts: HashMap::new() } + loop { + match dir.next_entry().await { + Ok(Some(entry)) => insert_or_prune(self, &entry, cache_path).await, + Ok(None) => break, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?err, + "error processing artifacts in {:?}", + cache_path, + ); + break + }, + } + } } /// Returns the state of the given artifact by its ID. pub fn artifact_state_mut(&mut self, artifact_id: &ArtifactId) -> Option<&mut ArtifactState> { - self.artifacts.get_mut(artifact_id) + self.inner.get_mut(artifact_id) } /// Inform the table about the artifact with the given ID. The state will be set to "preparing". @@ -212,53 +334,52 @@ impl Artifacts { pub fn insert_preparing( &mut self, artifact_id: ArtifactId, - waiting_for_response: Vec, + waiting_for_response: Vec, ) { // See the precondition. always!(self - .artifacts + .inner .insert(artifact_id, ArtifactState::Preparing { waiting_for_response, num_failures: 0 }) .is_none()); } /// Insert an artifact with the given ID as "prepared". /// - /// This function must be used only for brand-new artifacts and should never be used for - /// replacing existing ones. - #[cfg(test)] - pub fn insert_prepared( + /// This function should only be used to build the artifact table at startup with valid + /// artifact caches. + pub(crate) fn insert_prepared( &mut self, artifact_id: ArtifactId, + path: PathBuf, last_time_needed: SystemTime, prepare_stats: PrepareStats, ) { // See the precondition. always!(self - .artifacts - .insert(artifact_id, ArtifactState::Prepared { last_time_needed, prepare_stats }) + .inner + .insert(artifact_id, ArtifactState::Prepared { path, last_time_needed, prepare_stats }) .is_none()); } - /// Remove and retrieve the artifacts from the table that are older than the supplied - /// Time-To-Live. - pub fn prune(&mut self, artifact_ttl: Duration) -> Vec { + /// Remove artifacts older than the given TTL and return id and path of the removed ones. + pub fn prune(&mut self, artifact_ttl: Duration) -> Vec<(ArtifactId, PathBuf)> { let now = SystemTime::now(); let mut to_remove = vec![]; - for (k, v) in self.artifacts.iter() { - if let ArtifactState::Prepared { last_time_needed, .. } = *v { + for (k, v) in self.inner.iter() { + if let ArtifactState::Prepared { last_time_needed, ref path, .. } = *v { if now .duration_since(last_time_needed) .map(|age| age > artifact_ttl) .unwrap_or(false) { - to_remove.push(k.clone()); + to_remove.push((k.clone(), path.clone())); } } } for artifact in &to_remove { - self.artifacts.remove(artifact); + self.inner.remove(&artifact.0); } to_remove @@ -267,13 +388,72 @@ impl Artifacts { #[cfg(test)] mod tests { - use super::{ArtifactId, Artifacts, NODE_VERSION}; + use super::{artifact_prefix as prefix, ArtifactId, Artifacts, NODE_VERSION, RUNTIME_VERSION}; use polkadot_primitives::ExecutorParamsHash; + use rand::Rng; use sp_core::H256; - use std::{path::Path, str::FromStr}; + use std::{ + fs, + io::Write, + path::{Path, PathBuf}, + str::FromStr, + }; + + fn rand_hash(len: usize) -> String { + let mut rng = rand::thread_rng(); + let hex: Vec<_> = "0123456789abcdef".chars().collect(); + (0..len).map(|_| hex[rng.gen_range(0..hex.len())]).collect() + } + + fn file_name(code_hash: &str, param_hash: &str, checksum: &str) -> String { + format!("{}_0x{}_0x{}_0x{}", prefix(), code_hash, param_hash, checksum) + } - fn file_name(code_hash: &str, param_hash: &str) -> String { - format!("wasmtime_polkadot_v{}_0x{}_0x{}", NODE_VERSION, code_hash, param_hash) + fn create_artifact( + dir: impl AsRef, + prefix: &str, + code_hash: impl AsRef, + params_hash: impl AsRef, + ) -> (PathBuf, String) { + fn artifact_path_without_checksum( + dir: impl AsRef, + prefix: &str, + code_hash: impl AsRef, + params_hash: impl AsRef, + ) -> PathBuf { + let mut path = dir.as_ref().to_path_buf(); + let file_name = + format!("{}_0x{}_0x{}", prefix, code_hash.as_ref(), params_hash.as_ref(),); + path.push(file_name); + path + } + + let (code_hash, params_hash) = (code_hash.as_ref(), params_hash.as_ref()); + let path = artifact_path_without_checksum(dir, prefix, code_hash, params_hash); + let mut file = fs::File::create(&path).unwrap(); + + let content = format!("{}{}", code_hash, params_hash).into_bytes(); + file.write_all(&content).unwrap(); + let checksum = blake3::hash(&content).to_hex().to_string(); + + (path, checksum) + } + + fn create_rand_artifact(dir: impl AsRef, prefix: &str) -> (PathBuf, String) { + create_artifact(dir, prefix, rand_hash(64), rand_hash(64)) + } + + fn concluded_path(path: impl AsRef, checksum: &str) -> PathBuf { + let path = path.as_ref(); + let mut file_name = path.file_name().unwrap().to_os_string(); + file_name.push("_0x"); + file_name.push(checksum); + path.with_file_name(file_name) + } + + #[test] + fn artifact_prefix() { + assert_eq!(prefix(), format!("wasmtime_v{}_polkadot_v{}", RUNTIME_VERSION, NODE_VERSION)); } #[test] @@ -284,6 +464,7 @@ mod tests { let file_name = file_name( "0022800000000000000000000000000000000000000000000000000000000000", "0033900000000000000000000000000000000000000000000000000000000000", + "00000000000000000000000000000000", ); assert_eq!( @@ -305,40 +486,54 @@ mod tests { let dir = Path::new("/test"); let code_hash = "1234567890123456789012345678901234567890123456789012345678901234"; let params_hash = "4321098765432109876543210987654321098765432109876543210987654321"; - let file_name = file_name(code_hash, params_hash); + let checksum = "34567890123456789012345678901234"; + let file_name = file_name(code_hash, params_hash, checksum); let code_hash = H256::from_str(code_hash).unwrap(); let params_hash = H256::from_str(params_hash).unwrap(); + let path = ArtifactId::new(code_hash.into(), ExecutorParamsHash::from_hash(params_hash)) + .path(dir, checksum); - assert_eq!( - ArtifactId::new(code_hash.into(), ExecutorParamsHash::from_hash(params_hash)) - .path(dir) - .to_str(), - Some(format!("/test/{}", file_name).as_str()), - ); + assert_eq!(path.to_str().unwrap(), format!("/test/{}", file_name)); } #[tokio::test] - async fn artifacts_removes_cache_on_startup() { - let fake_cache_path = crate::worker_intf::tmppath("test-cache").await.unwrap(); - let fake_artifact_path = { - let mut p = fake_cache_path.clone(); - p.push("wasmtime_0x1234567890123456789012345678901234567890123456789012345678901234"); - p - }; + async fn remove_stale_cache_on_startup() { + let cache_dir = crate::worker_intf::tmppath("test-cache").await.unwrap(); + fs::create_dir_all(&cache_dir).unwrap(); + + // invalid prefix + create_rand_artifact(&cache_dir, ""); + create_rand_artifact(&cache_dir, "wasmtime_polkadot_v"); + create_rand_artifact(&cache_dir, "wasmtime_v8.0.0_polkadot_v1.0.0"); + + let prefix = prefix(); + + // no checksum + create_rand_artifact(&cache_dir, &prefix); + + // invalid hashes + let (path, checksum) = create_artifact(&cache_dir, &prefix, "000", "000001"); + let new_path = concluded_path(&path, &checksum); + fs::rename(&path, &new_path).unwrap(); - // create a tmp cache with 1 artifact. + // checksum tampered + let (path, checksum) = create_rand_artifact(&cache_dir, &prefix); + let new_path = concluded_path(&path, checksum.chars().rev().collect::().as_str()); + fs::rename(&path, &new_path).unwrap(); - std::fs::create_dir_all(&fake_cache_path).unwrap(); - std::fs::File::create(fake_artifact_path).unwrap(); + // valid + let (path, checksum) = create_rand_artifact(&cache_dir, &prefix); + let new_path = concluded_path(&path, &checksum); + fs::rename(&path, &new_path).unwrap(); - // this should remove it and re-create. + assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 7); - let p = &fake_cache_path; - Artifacts::new(p).await; + let artifacts = Artifacts::new_and_prune(&cache_dir).await; - assert_eq!(std::fs::read_dir(&fake_cache_path).unwrap().count(), 0); + assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 1); + assert_eq!(artifacts.len(), 1); - std::fs::remove_dir_all(fake_cache_path).unwrap(); + fs::remove_dir_all(cache_dir).unwrap(); } } diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 5919b9ba32c..f67934e4171 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -32,14 +32,15 @@ use futures::{ Future, FutureExt, SinkExt, StreamExt, }; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult}, + error::{PrecheckResult, PrepareError}, + prepare::PrepareSuccess, pvf::PvfPrepData, }; use polkadot_node_subsystem::SubsystemResult; use polkadot_parachain_primitives::primitives::ValidationResult; use std::{ collections::HashMap, - path::{Path, PathBuf}, + path::PathBuf, time::{Duration, SystemTime}, }; @@ -63,7 +64,7 @@ pub const EXECUTE_BINARY_NAME: &str = "polkadot-execute-worker"; pub(crate) type ResultSender = oneshot::Sender>; /// Transmission end used for sending the PVF preparation result. -pub(crate) type PrepareResultSender = oneshot::Sender; +pub(crate) type PrecheckResultSender = oneshot::Sender; /// A handle to the async process serving the validation host requests. #[derive(Clone)] @@ -83,7 +84,7 @@ impl ValidationHost { pub async fn precheck_pvf( &mut self, pvf: PvfPrepData, - result_tx: PrepareResultSender, + result_tx: PrecheckResultSender, ) -> Result<(), String> { self.to_host_tx .send(ToHost::PrecheckPvf { pvf, result_tx }) @@ -133,7 +134,7 @@ impl ValidationHost { } enum ToHost { - PrecheckPvf { pvf: PvfPrepData, result_tx: PrepareResultSender }, + PrecheckPvf { pvf: PvfPrepData, result_tx: PrecheckResultSender }, ExecutePvf(ExecutePvfInputs), HeadsUp { active_pvfs: Vec }, } @@ -249,10 +250,9 @@ pub async fn start( let run_sweeper = sweeper_task(to_sweeper_rx); let run_host = async move { - let artifacts = Artifacts::new(&config.cache_path).await; + let artifacts = Artifacts::new_and_prune(&config.cache_path).await; run(Inner { - cache_path: config.cache_path, cleanup_pulse_interval: Duration::from_secs(3600), artifact_ttl: Duration::from_secs(3600 * 24), artifacts, @@ -296,7 +296,6 @@ impl AwaitingPrepare { } struct Inner { - cache_path: PathBuf, cleanup_pulse_interval: Duration, artifact_ttl: Duration, artifacts: Artifacts, @@ -317,7 +316,6 @@ struct Fatal; async fn run( Inner { - cache_path, cleanup_pulse_interval, artifact_ttl, mut artifacts, @@ -361,7 +359,6 @@ async fn run( // will notice it. break_if_fatal!(handle_cleanup_pulse( - &cache_path, &mut to_sweeper_tx, &mut artifacts, artifact_ttl, @@ -380,7 +377,6 @@ async fn run( // If the artifact failed before, it could be re-scheduled for preparation here if // the preparation failure cooldown has elapsed. break_if_fatal!(handle_to_host( - &cache_path, &mut artifacts, &mut to_prepare_queue_tx, &mut to_execute_queue_tx, @@ -402,7 +398,6 @@ async fn run( // We could be eager in terms of reporting and plumb the result from the preparation // worker but we don't for the sake of simplicity. break_if_fatal!(handle_prepare_done( - &cache_path, &mut artifacts, &mut to_execute_queue_tx, &mut awaiting_prepare, @@ -414,7 +409,6 @@ async fn run( } async fn handle_to_host( - cache_path: &Path, artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, execute_queue: &mut mpsc::Sender, @@ -426,15 +420,8 @@ async fn handle_to_host( handle_precheck_pvf(artifacts, prepare_queue, pvf, result_tx).await?; }, ToHost::ExecutePvf(inputs) => { - handle_execute_pvf( - cache_path, - artifacts, - prepare_queue, - execute_queue, - awaiting_prepare, - inputs, - ) - .await?; + handle_execute_pvf(artifacts, prepare_queue, execute_queue, awaiting_prepare, inputs) + .await?; }, ToHost::HeadsUp { active_pvfs } => handle_heads_up(artifacts, prepare_queue, active_pvfs).await?, @@ -454,21 +441,21 @@ async fn handle_precheck_pvf( artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, pvf: PvfPrepData, - result_sender: PrepareResultSender, + result_sender: PrecheckResultSender, ) -> Result<(), Fatal> { let artifact_id = ArtifactId::from_pvf_prep_data(&pvf); if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { - ArtifactState::Prepared { last_time_needed, prepare_stats } => { + ArtifactState::Prepared { last_time_needed, .. } => { *last_time_needed = SystemTime::now(); - let _ = result_sender.send(Ok(prepare_stats.clone())); + let _ = result_sender.send(Ok(())); }, ArtifactState::Preparing { waiting_for_response, num_failures: _ } => waiting_for_response.push(result_sender), ArtifactState::FailedToProcess { error, .. } => { // Do not retry an artifact that previously failed preparation. - let _ = result_sender.send(PrepareResult::Err(error.clone())); + let _ = result_sender.send(PrecheckResult::Err(error.clone())); }, } } else { @@ -491,7 +478,6 @@ async fn handle_precheck_pvf( /// When preparing for execution, we use a more lenient timeout ([`LENIENT_PREPARATION_TIMEOUT`]) /// than when prechecking. async fn handle_execute_pvf( - cache_path: &Path, artifacts: &mut Artifacts, prepare_queue: &mut mpsc::Sender, execute_queue: &mut mpsc::Sender, @@ -504,8 +490,8 @@ async fn handle_execute_pvf( if let Some(state) = artifacts.artifact_state_mut(&artifact_id) { match state { - ArtifactState::Prepared { last_time_needed, .. } => { - let file_metadata = std::fs::metadata(artifact_id.path(cache_path)); + ArtifactState::Prepared { ref path, last_time_needed, .. } => { + let file_metadata = std::fs::metadata(path); if file_metadata.is_ok() { *last_time_needed = SystemTime::now(); @@ -514,7 +500,7 @@ async fn handle_execute_pvf( send_execute( execute_queue, execute::ToQueue::Enqueue { - artifact: ArtifactPathId::new(artifact_id, cache_path), + artifact: ArtifactPathId::new(artifact_id, path), pending_execution_request: PendingExecutionRequest { exec_timeout, params, @@ -677,7 +663,6 @@ async fn handle_heads_up( } async fn handle_prepare_done( - cache_path: &Path, artifacts: &mut Artifacts, execute_queue: &mut mpsc::Sender, awaiting_prepare: &mut AwaitingPrepare, @@ -718,7 +703,8 @@ async fn handle_prepare_done( state { for result_sender in waiting_for_response.drain(..) { - let _ = result_sender.send(result.clone()); + let result = result.clone().map(|_| ()); + let _ = result_sender.send(result); } num_failures } else { @@ -738,16 +724,18 @@ async fn handle_prepare_done( continue } - // Don't send failed artifacts to the execution's queue. - if let Err(ref error) = result { - let _ = result_tx.send(Err(ValidationError::from(error.clone()))); - continue - } + let path = match &result { + Ok(success) => success.path.clone(), + Err(error) => { + let _ = result_tx.send(Err(ValidationError::from(error.clone()))); + continue + }, + }; send_execute( execute_queue, execute::ToQueue::Enqueue { - artifact: ArtifactPathId::new(artifact_id.clone(), cache_path), + artifact: ArtifactPathId::new(artifact_id.clone(), &path), pending_execution_request: PendingExecutionRequest { exec_timeout, params, @@ -760,8 +748,8 @@ async fn handle_prepare_done( } *state = match result { - Ok(prepare_stats) => - ArtifactState::Prepared { last_time_needed: SystemTime::now(), prepare_stats }, + Ok(PrepareSuccess { path, stats: prepare_stats }) => + ArtifactState::Prepared { path, last_time_needed: SystemTime::now(), prepare_stats }, Err(error) => { let last_time_failed = SystemTime::now(); let num_failures = *num_failures + 1; @@ -814,7 +802,6 @@ async fn enqueue_prepare_for_execute( } async fn handle_cleanup_pulse( - cache_path: &Path, sweeper_tx: &mut mpsc::Sender, artifacts: &mut Artifacts, artifact_ttl: Duration, @@ -825,14 +812,13 @@ async fn handle_cleanup_pulse( "PVF pruning: {} artifacts reached their end of life", to_remove.len(), ); - for artifact_id in to_remove { + for (artifact_id, path) in to_remove { gum::debug!( target: LOG_TARGET, validation_code_hash = ?artifact_id.code_hash, "pruning artifact", ); - let artifact_path = artifact_id.path(cache_path); - sweeper_tx.send(artifact_path).await.map_err(|_| Fatal)?; + sweeper_tx.send(path).await.map_err(|_| Fatal)?; } Ok(()) @@ -890,7 +876,11 @@ pub(crate) mod tests { use crate::InvalidCandidate; use assert_matches::assert_matches; use futures::future::BoxFuture; - use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats}; + use polkadot_node_core_pvf_common::{ + error::PrepareError, + prepare::{PrepareStats, PrepareSuccess}, + }; + use sp_core::hexdisplay::AsBytesRef; const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); pub(crate) const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); @@ -910,12 +900,16 @@ pub(crate) mod tests { } /// Creates a new PVF which artifact id can be uniquely identified by the given number. - fn artifact_id(descriminator: u32) -> ArtifactId { - ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(descriminator)) + fn artifact_id(discriminator: u32) -> ArtifactId { + ArtifactId::from_pvf_prep_data(&PvfPrepData::from_discriminator(discriminator)) } - fn artifact_path(descriminator: u32) -> PathBuf { - artifact_id(descriminator).path(&PathBuf::from(std::env::temp_dir())).to_owned() + fn artifact_path(discriminator: u32) -> PathBuf { + let pvf = PvfPrepData::from_discriminator(discriminator); + let checksum = blake3::hash(pvf.code().as_bytes_ref()); + artifact_id(discriminator) + .path(&PathBuf::from(std::env::temp_dir()), checksum.to_hex().as_str()) + .to_owned() } struct Builder { @@ -953,8 +947,6 @@ pub(crate) mod tests { impl Test { fn new(Builder { cleanup_pulse_interval, artifact_ttl, artifacts }: Builder) -> Self { - let cache_path = PathBuf::from(std::env::temp_dir()); - let (to_host_tx, to_host_rx) = mpsc::channel(10); let (to_prepare_queue_tx, to_prepare_queue_rx) = mpsc::channel(10); let (from_prepare_queue_tx, from_prepare_queue_rx) = mpsc::unbounded(); @@ -962,7 +954,6 @@ pub(crate) mod tests { let (to_sweeper_tx, to_sweeper_rx) = mpsc::channel(10); let run = run(Inner { - cache_path, cleanup_pulse_interval, artifact_ttl, artifacts, @@ -1111,12 +1102,18 @@ pub(crate) mod tests { let mut builder = Builder::default(); builder.cleanup_pulse_interval = Duration::from_millis(100); builder.artifact_ttl = Duration::from_millis(500); - builder - .artifacts - .insert_prepared(artifact_id(1), mock_now, PrepareStats::default()); - builder - .artifacts - .insert_prepared(artifact_id(2), mock_now, PrepareStats::default()); + builder.artifacts.insert_prepared( + artifact_id(1), + artifact_path(1), + mock_now, + PrepareStats::default(), + ); + builder.artifacts.insert_prepared( + artifact_id(2), + artifact_path(2), + mock_now, + PrepareStats::default(), + ); let mut test = builder.build(); let mut host = test.host_handle(); @@ -1188,7 +1185,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1204,7 +1201,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(2), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1258,7 +1255,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1371,7 +1368,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(2), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1527,7 +1524,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); @@ -1703,7 +1700,7 @@ pub(crate) mod tests { test.from_prepare_queue_tx .send(prepare::FromQueue { artifact_id: artifact_id(1), - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }) .await .unwrap(); diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs index 102a91dbdad..7e7a1325254 100644 --- a/polkadot/node/core/pvf/src/lib.rs +++ b/polkadot/node/core/pvf/src/lib.rs @@ -84,7 +84,7 @@ //! A pruning task will run at a fixed interval of time. This task will remove all artifacts that //! weren't used or received a heads up signal for a while. //! -//! ## Execution +//! ## Execution //! //! The execute workers will be fed by the requests from the execution queue, which is basically a //! combination of a path to the compiled artifact and the diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 8e02f540d32..21af21e5b02 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -68,7 +68,7 @@ pub enum ToPool { /// /// In either case, the worker is considered busy and no further `StartWork` messages should be /// sent until either `Concluded` or `Rip` message is received. - StartWork { worker: Worker, pvf: PvfPrepData, artifact_path: PathBuf }, + StartWork { worker: Worker, pvf: PvfPrepData, cache_path: PathBuf }, } /// A message sent from pool to its client. @@ -232,7 +232,7 @@ fn handle_to_pool( .boxed(), ); }, - ToPool::StartWork { worker, pvf, artifact_path } => { + ToPool::StartWork { worker, pvf, cache_path } => { if let Some(data) = spawned.get_mut(worker) { if let Some(idle) = data.idle.take() { let preparation_timer = metrics.time_preparation(); @@ -242,7 +242,7 @@ fn handle_to_pool( worker, idle, pvf, - artifact_path, + cache_path, preparation_timer, ) .boxed(), @@ -303,10 +303,10 @@ async fn start_work_task( worker: Worker, idle: IdleWorker, pvf: PvfPrepData, - artifact_path: PathBuf, + cache_path: PathBuf, _preparation_timer: Option, ) -> PoolEvent { - let outcome = worker_intf::start_work(&metrics, idle, pvf, artifact_path).await; + let outcome = worker_intf::start_work(&metrics, idle, pvf, cache_path).await; PoolEvent::StartWork(worker, outcome) } diff --git a/polkadot/node/core/pvf/src/prepare/queue.rs b/polkadot/node/core/pvf/src/prepare/queue.rs index c38012d7454..c140a6cafda 100644 --- a/polkadot/node/core/pvf/src/prepare/queue.rs +++ b/polkadot/node/core/pvf/src/prepare/queue.rs @@ -268,12 +268,12 @@ fn find_idle_worker(queue: &mut Queue) -> Option { } async fn handle_from_pool(queue: &mut Queue, from_pool: pool::FromPool) -> Result<(), Fatal> { - use pool::FromPool::*; + use pool::FromPool; match from_pool { - Spawned(worker) => handle_worker_spawned(queue, worker).await?, - Concluded { worker, rip, result } => + FromPool::Spawned(worker) => handle_worker_spawned(queue, worker).await?, + FromPool::Concluded { worker, rip, result } => handle_worker_concluded(queue, worker, rip, result).await?, - Rip(worker) => handle_worker_rip(queue, worker).await?, + FromPool::Rip(worker) => handle_worker_rip(queue, worker).await?, } Ok(()) } @@ -424,17 +424,17 @@ async fn spawn_extra_worker(queue: &mut Queue, critical: bool) -> Result<(), Fat /// Attaches the work to the given worker telling the poll about the job. async fn assign(queue: &mut Queue, worker: Worker, job: Job) -> Result<(), Fatal> { let job_data = &mut queue.jobs[job]; - - let artifact_id = ArtifactId::from_pvf_prep_data(&job_data.pvf); - let artifact_path = artifact_id.path(&queue.cache_path); - job_data.worker = Some(worker); queue.workers[worker].job = Some(job); send_pool( &mut queue.to_pool_tx, - pool::ToPool::StartWork { worker, pvf: job_data.pvf.clone(), artifact_path }, + pool::ToPool::StartWork { + worker, + pvf: job_data.pvf.clone(), + cache_path: queue.cache_path.clone(), + }, ) .await?; @@ -491,7 +491,7 @@ mod tests { use crate::host::tests::TEST_PREPARATION_TIMEOUT; use assert_matches::assert_matches; use futures::{future::BoxFuture, FutureExt}; - use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareStats}; + use polkadot_node_core_pvf_common::{error::PrepareError, prepare::PrepareSuccess}; use slotmap::SlotMap; use std::task::Poll; @@ -612,7 +612,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_eq!( @@ -651,7 +651,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_matches!(test.poll_and_recv_to_pool().await, pool::ToPool::StartWork { .. }); @@ -697,7 +697,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: false, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); assert_eq!(test.poll_and_recv_to_pool().await, pool::ToPool::Kill(w1)); } @@ -731,7 +731,7 @@ mod tests { test.send_from_pool(pool::FromPool::Concluded { worker: w1, rip: true, - result: Ok(PrepareStats::default()), + result: Ok(PrepareSuccess::default()), }); // Since there is still work, the queue requested one extra worker to spawn to handle the diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index a22fa74b2fe..e7f142a46bb 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -17,6 +17,7 @@ //! Host interface to the prepare worker. use crate::{ + artifacts::ArtifactId, metrics::Metrics, security, worker_intf::{ @@ -27,8 +28,8 @@ use crate::{ }; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ - error::{PrepareError, PrepareResult}, - prepare::PrepareStats, + error::{PrepareError, PrepareResult, PrepareWorkerResult}, + prepare::{PrepareStats, PrepareSuccess, PrepareWorkerSuccess}, pvf::PvfPrepData, worker_dir, SecurityStatus, }; @@ -81,7 +82,7 @@ pub enum Outcome { /// final destination location. RenameTmpFile { worker: IdleWorker, - result: PrepareResult, + result: PrepareWorkerResult, err: String, // Unfortunately `PathBuf` doesn't implement `Encode`/`Decode`, so we do a fallible // conversion to `Option`. @@ -115,7 +116,7 @@ pub async fn start_work( metrics: &Metrics, worker: IdleWorker, pvf: PvfPrepData, - artifact_path: PathBuf, + cache_path: PathBuf, ) -> Outcome { let IdleWorker { stream, pid, worker_dir } = worker; @@ -123,8 +124,8 @@ pub async fn start_work( target: LOG_TARGET, worker_pid = %pid, ?worker_dir, - "starting prepare for {}", - artifact_path.display(), + "starting prepare for {:?}", + pvf, ); with_worker_dir_setup( @@ -135,7 +136,7 @@ pub async fn start_work( let preparation_timeout = pvf.prep_timeout(); let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; - if let Err(err) = send_request(&mut stream, pvf.clone()).await { + if let Err(err) = send_request(&mut stream, &pvf).await { gum::warn!( target: LOG_TARGET, worker_pid = %pid, @@ -159,7 +160,7 @@ pub async fn start_work( match result { // Received bytes from worker within the time limit. - Ok(Ok(prepare_result)) => { + Ok(Ok(prepare_worker_result)) => { // Check if any syscall violations occurred during the job. For now this is only // informative, as we are not enforcing the seccomp policy yet. for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { @@ -175,10 +176,11 @@ pub async fn start_work( handle_response( metrics, IdleWorker { stream, pid, worker_dir }, - prepare_result, + prepare_worker_result, pid, tmp_artifact_file, - artifact_path, + &pvf, + &cache_path, preparation_timeout, ) .await @@ -215,20 +217,22 @@ pub async fn start_work( async fn handle_response( metrics: &Metrics, worker: IdleWorker, - result: PrepareResult, + result: PrepareWorkerResult, worker_pid: u32, tmp_file: PathBuf, - artifact_path: PathBuf, + pvf: &PvfPrepData, + cache_path: &PathBuf, preparation_timeout: Duration, ) -> Outcome { - let PrepareStats { cpu_time_elapsed, memory_stats } = match result.clone() { - Ok(result) => result, - // Timed out on the child. This should already be logged by the child. - Err(PrepareError::TimedOut) => return Outcome::TimedOut, - Err(PrepareError::JobDied(err)) => return Outcome::JobDied(err), - Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, - Err(_) => return Outcome::Concluded { worker, result }, - }; + let PrepareWorkerSuccess { checksum, stats: PrepareStats { cpu_time_elapsed, memory_stats } } = + match result.clone() { + Ok(result) => result, + // Timed out on the child. This should already be logged by the child. + Err(PrepareError::TimedOut) => return Outcome::TimedOut, + Err(PrepareError::JobDied(err)) => return Outcome::JobDied(err), + Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, + Err(err) => return Outcome::Concluded { worker, result: Err(err) }, + }; if cpu_time_elapsed > preparation_timeout { // The job didn't complete within the timeout. @@ -243,6 +247,9 @@ async fn handle_response( return Outcome::TimedOut } + let artifact_id = ArtifactId::from_pvf_prep_data(pvf); + let artifact_path = artifact_id.path(cache_path, &checksum); + gum::debug!( target: LOG_TARGET, %worker_pid, @@ -252,7 +259,13 @@ async fn handle_response( ); let outcome = match tokio::fs::rename(&tmp_file, &artifact_path).await { - Ok(()) => Outcome::Concluded { worker, result }, + Ok(()) => Outcome::Concluded { + worker, + result: Ok(PrepareSuccess { + path: artifact_path, + stats: PrepareStats { cpu_time_elapsed, memory_stats: memory_stats.clone() }, + }), + }, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -329,14 +342,14 @@ where outcome } -async fn send_request(stream: &mut UnixStream, pvf: PvfPrepData) -> io::Result<()> { +async fn send_request(stream: &mut UnixStream, pvf: &PvfPrepData) -> io::Result<()> { framed_send(stream, &pvf.encode()).await?; Ok(()) } -async fn recv_response(stream: &mut UnixStream, pid: u32) -> io::Result { +async fn recv_response(stream: &mut UnixStream, pid: u32) -> io::Result { let result = framed_recv(stream).await?; - let result = PrepareResult::decode(&mut &result[..]).map_err(|e| { + let result = PrepareWorkerResult::decode(&mut &result[..]).map_err(|e| { // We received invalid bytes from the worker. let bound_bytes = &result[..result.len().min(4)]; gum::warn!( diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_intf.rs index 8f9a7de354b..5e589b9abce 100644 --- a/polkadot/node/core/pvf/src/worker_intf.rs +++ b/polkadot/node/core/pvf/src/worker_intf.rs @@ -198,7 +198,7 @@ pub async fn tmppath_in(prefix: &str, dir: &Path) -> io::Result { /// The same as [`tmppath_in`], but uses [`std::env::temp_dir`] as the directory. pub async fn tmppath(prefix: &str) -> io::Result { - let temp_dir = PathBuf::from(std::env::temp_dir()); + let temp_dir = std::env::temp_dir(); tmppath_in(prefix, &temp_dir).await } @@ -453,7 +453,7 @@ impl Drop for WorkerDir { /// artifacts from previous jobs. pub fn clear_worker_dir_path(worker_dir_path: &Path) -> io::Result<()> { fn remove_dir_contents(path: &Path) -> io::Result<()> { - for entry in std::fs::read_dir(&path)? { + for entry in std::fs::read_dir(path)? { let entry = entry?; let path = entry.path(); diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index d2d842cf84a..5bdf49cc719 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -20,8 +20,7 @@ use assert_matches::assert_matches; use parity_scale_codec::Encode as _; use polkadot_node_core_pvf::{ start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, - PrepareJobKind, PrepareStats, PvfPrepData, ValidationError, ValidationHost, - JOB_TIMEOUT_WALL_CLOCK_FACTOR, + PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; use polkadot_primitives::{ExecutorParam, ExecutorParams}; @@ -70,7 +69,7 @@ impl TestHost { &self, code: &[u8], executor_params: ExecutorParams, - ) -> Result { + ) -> Result<(), PrepareError> { let (result_tx, result_rx) = futures::channel::oneshot::channel(); let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) diff --git a/substrate/utils/build-script-utils/src/version.rs b/substrate/utils/build-script-utils/src/version.rs index f6a9ff9554a..549e499b110 100644 --- a/substrate/utils/build-script-utils/src/version.rs +++ b/substrate/utils/build-script-utils/src/version.rs @@ -59,3 +59,34 @@ fn get_version(impl_commit: &str) -> String { impl_commit ) } + +/// Generate `SUBSTRATE_WASMTIME_VERSION` +pub fn generate_wasmtime_version() { + generate_dependency_version("wasmtime", "SUBSTRATE_WASMTIME_VERSION"); +} + +fn generate_dependency_version(dep: &str, env_var: &str) { + // we only care about the root + match std::process::Command::new("cargo") + .args(["tree", "--depth=0", "--locked", "--package", dep]) + .output() + { + Ok(output) if output.status.success() => { + let version = String::from_utf8_lossy(&output.stdout); + + // vX.X.X + if let Some(ver) = version.strip_prefix(&format!("{} v", dep)) { + println!("cargo:rustc-env={}={}", env_var, ver); + } else { + println!("cargo:warning=Unexpected result {}", version); + } + }, + + // command errors out when it could not find the given dependency + // or when having multiple versions of it + Ok(output) => + println!("cargo:warning=`cargo tree` {}", String::from_utf8_lossy(&output.stderr)), + + Err(err) => println!("cargo:warning=Could not run `cargo tree`: {}", err), + } +} -- GitLab From aa5705bff973522a7bd4e39f2ec1a8da56f59f95 Mon Sep 17 00:00:00 2001 From: Ross Bulat Date: Mon, 20 Nov 2023 09:19:34 +0000 Subject: [PATCH 082/199] Pools: Add `MaxUnbonding` to metadata (#2397) --- substrate/frame/nomination-pools/src/lib.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 909a930e382..ab4bd51ffc0 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1626,6 +1626,10 @@ pub mod pallet { #[pallet::constant] type MaxPointsToBalance: Get; + /// The maximum number of simultaneous unbonding chunks that can exist per member. + #[pallet::constant] + type MaxUnbonding: Get; + /// Infallible method for converting `Currency::Balance` to `U256`. type BalanceToU256: Convert, U256>; @@ -1644,9 +1648,6 @@ pub mod pallet { /// The maximum length, in bytes, that a pools metadata maybe. type MaxMetadataLen: Get; - - /// The maximum number of simultaneous unbonding chunks that can exist per member. - type MaxUnbonding: Get; } /// The sum of funds across all pools. -- GitLab From b35300c377d502e75e9e3fb6f15934bc0f31ef53 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 11:25:58 +0200 Subject: [PATCH 083/199] Bump docker/build-push-action from 5.0.0 to 5.1.0 (#2404) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5.0.0 to 5.1.0.
Release notes

Sourced from docker/build-push-action's releases.

v5.1.0

Full Changelog: https://github.com/docker/build-push-action/compare/v5.0.0...v5.1.0

Commits
  • 4a13e50 Merge pull request #1006 from docker/dependabot/npm_and_yarn/docker/actions-t...
  • 7416668 chore: update generated content
  • b4f76a5 chore(deps): Bump @​docker/actions-toolkit from 0.13.0 to 0.14.0
  • b7feb76 Merge pull request #1005 from crazy-max/ci-inspect
  • fae8018 ci: inspect sbom and provenance
  • b625868 Merge pull request #1004 from crazy-max/ci-update-buildx
  • 5193ef1 ci: update buildx to latest
  • d3afd77 Merge pull request #991 from docker/dependabot/npm_and_yarn/babel/traverse-7....
  • 7a786bb Merge pull request #992 from crazy-max/annotations
  • c66ae3a chore: update generated content
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/build-push-action&package-manager=github_actions&previous-version=5.0.0&new-version=5.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release-50_publish-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index 891f43e605c..567e996b8fd 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -268,7 +268,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: push: true file: docker/dockerfiles/polkadot/polkadot_injected_debian.Dockerfile -- GitLab From ede4a362622dfa69eb167eaa876246b1289f4b41 Mon Sep 17 00:00:00 2001 From: jserrat <35823283+Jpserrat@users.noreply.github.com> Date: Mon, 20 Nov 2023 11:00:19 +0000 Subject: [PATCH 084/199] remove retry from backers on failed candidate validation (#2182) Hey guys, as discussed I've changed the name to a more general one `PvfExecKind`, is this good or too general? Creating this as a draft, I still have to fix the tests. Closes #1585 Kusama address: FkB6QEo8VnV3oifugNj5NeVG3Mvq1zFbrUu4P5YwRoe5mQN --------- Co-authored-by: command-bot <> Co-authored-by: Marcin S --- polkadot/node/core/approval-voting/src/lib.rs | 4 +- .../node/core/approval-voting/src/tests.rs | 4 +- polkadot/node/core/backing/src/lib.rs | 4 +- polkadot/node/core/backing/src/tests/mod.rs | 38 ++-- .../src/tests/prospective_parachains.rs | 4 +- .../node/core/candidate-validation/src/lib.rs | 105 ++++++---- .../core/candidate-validation/src/tests.rs | 190 ++++++++++-------- .../src/participation/mod.rs | 4 +- .../src/participation/tests.rs | 16 +- polkadot/node/malus/src/variants/common.rs | 36 ++-- .../node/overseer/examples/minimal-example.rs | 4 +- polkadot/node/overseer/src/tests.rs | 6 +- polkadot/node/subsystem-types/src/messages.rs | 10 +- polkadot/primitives/src/lib.rs | 10 +- polkadot/primitives/src/v6/executor_params.rs | 39 ++-- polkadot/primitives/src/v6/mod.rs | 26 +-- .../src/node/utility/pvf-host-and-workers.md | 4 + .../src/configuration/benchmarking.rs | 10 +- 18 files changed, 271 insertions(+), 243 deletions(-) diff --git a/polkadot/node/core/approval-voting/src/lib.rs b/polkadot/node/core/approval-voting/src/lib.rs index 94f7fcaf941..ef01727b7eb 100644 --- a/polkadot/node/core/approval-voting/src/lib.rs +++ b/polkadot/node/core/approval-voting/src/lib.rs @@ -54,7 +54,7 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::{ ApprovalVote, BlockNumber, CandidateHash, CandidateIndex, CandidateReceipt, DisputeStatement, - ExecutorParams, GroupIndex, Hash, PvfExecTimeoutKind, SessionIndex, SessionInfo, + ExecutorParams, GroupIndex, Hash, PvfExecKind, SessionIndex, SessionInfo, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, }; use sc_keystore::LocalKeystore; @@ -2867,7 +2867,7 @@ async fn launch_approval( candidate_receipt: candidate.clone(), pov: available_data.pov, executor_params, - exec_timeout_kind: PvfExecTimeoutKind::Approval, + exec_kind: PvfExecKind::Approval, response_sender: val_tx, }) .await; diff --git a/polkadot/node/core/approval-voting/src/tests.rs b/polkadot/node/core/approval-voting/src/tests.rs index 0c0dcfde9b6..11bcba9c388 100644 --- a/polkadot/node/core/approval-voting/src/tests.rs +++ b/polkadot/node/core/approval-voting/src/tests.rs @@ -2705,10 +2705,10 @@ async fn handle_double_assignment_import( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive { - exec_timeout_kind, + exec_kind, response_sender, .. - }) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + }) if exec_kind == PvfExecKind::Approval => { response_sender.send(Ok(ValidationResult::Valid(Default::default(), Default::default()))) .unwrap(); } diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index a91eefe5e04..434051f1b00 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -106,7 +106,7 @@ use polkadot_node_subsystem_util::{ use polkadot_primitives::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CommittedCandidateReceipt, CoreIndex, CoreState, ExecutorParams, Hash, Id as ParaId, - PersistedValidationData, PvfExecTimeoutKind, SigningContext, ValidationCode, ValidatorId, + PersistedValidationData, PvfExecKind, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::KeystorePtr; @@ -566,7 +566,7 @@ async fn request_candidate_validation( candidate_receipt, pov, executor_params, - exec_timeout_kind: PvfExecTimeoutKind::Backing, + exec_kind: PvfExecKind::Backing, response_sender: tx, }) .await; diff --git a/polkadot/node/core/backing/src/tests/mod.rs b/polkadot/node/core/backing/src/tests/mod.rs index caa85c12989..c12be72556e 100644 --- a/polkadot/node/core/backing/src/tests/mod.rs +++ b/polkadot/node/core/backing/src/tests/mod.rs @@ -33,7 +33,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::{ - CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecTimeoutKind, + CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, PvfExecKind, ScheduledCore, SessionIndex, LEGACY_MIN_BACKING_VOTES, }; use sp_application_crypto::AppCrypto; @@ -344,14 +344,14 @@ async fn assert_validate_from_exhaustive( validation_data, validation_code, candidate_receipt, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == *assert_pvd && validation_code == *assert_validation_code && *pov == *assert_pov && &candidate_receipt.descriptor == assert_candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_receipt.commitments_hash == assert_candidate.commitments.hash() => { response_sender.send(Ok(ValidationResult::Valid( @@ -550,14 +550,14 @@ fn backing_works() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_ab && validation_code == validation_code_ab && *pov == pov_ab && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_receipt.commitments_hash == candidate_a_commitments_hash => { response_sender.send(Ok( @@ -729,14 +729,14 @@ fn backing_works_while_validation_ongoing() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_abc && validation_code == validation_code_abc && *pov == pov_abc && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { // we never validate the candidate. our local node @@ -890,14 +890,14 @@ fn backing_misbehavior_works() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_a && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_a_commitments_hash == candidate_receipt.commitments_hash => { response_sender.send(Ok( @@ -1057,14 +1057,14 @@ fn backing_dont_second_invalid() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_a.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); @@ -1097,14 +1097,14 @@ fn backing_dont_second_invalid() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_b && validation_code == validation_code_b && *pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate_b.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok( @@ -1224,14 +1224,14 @@ fn backing_second_after_first_fails_works() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); @@ -1368,14 +1368,14 @@ fn backing_works_after_failed_validation() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); @@ -1634,13 +1634,13 @@ fn retry_works() { validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, .. }, ) if validation_data == pvd_a && validation_code == validation_code_a && *pov == pov_a && &candidate_receipt.descriptor == candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash ); virtual_overseer diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index fc4bd7d98e7..e7c29e11bb4 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -232,14 +232,14 @@ async fn assert_validate_seconded_candidate( validation_code, candidate_receipt, pov, - exec_timeout_kind, + exec_kind, response_sender, .. }) if &validation_data == assert_pvd && &validation_code == assert_validation_code && &*pov == assert_pov && &candidate_receipt.descriptor == candidate.descriptor() && - exec_timeout_kind == PvfExecTimeoutKind::Backing && + exec_kind == PvfExecKind::Backing && candidate.commitments.hash() == candidate_receipt.commitments_hash => { response_sender.send(Ok(ValidationResult::Valid( diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 96ab07d3b82..f5d17af6c68 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -49,8 +49,8 @@ use polkadot_primitives::{ DEFAULT_LENIENT_PREPARATION_TIMEOUT, DEFAULT_PRECHECK_PREPARATION_TIMEOUT, }, CandidateCommitments, CandidateDescriptor, CandidateReceipt, ExecutorParams, Hash, - OccupiedCoreAssumption, PersistedValidationData, PvfExecTimeoutKind, PvfPrepTimeoutKind, - ValidationCode, ValidationCodeHash, + OccupiedCoreAssumption, PersistedValidationData, PvfExecKind, PvfPrepKind, ValidationCode, + ValidationCodeHash, }; use parity_scale_codec::Encode; @@ -73,12 +73,6 @@ mod tests; const LOG_TARGET: &'static str = "parachain::candidate-validation"; -/// The amount of time to wait before retrying after a retry-able backing validation error. We use a -/// lower value for the backing case, to fit within the lower backing timeout. -#[cfg(not(test))] -const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(500); -#[cfg(test)] -const PVF_BACKING_EXECUTION_RETRY_DELAY: Duration = Duration::from_millis(200); /// The amount of time to wait before retrying after a retry-able approval validation error. We use /// a higher value for the approval case since we have more time, and if we wait longer it is more /// likely that transient conditions will resolve. @@ -163,7 +157,7 @@ async fn run( candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, .. } => { @@ -180,7 +174,7 @@ async fn run( candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, &metrics, ) .await; @@ -198,7 +192,7 @@ async fn run( candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, .. } => { @@ -215,7 +209,7 @@ async fn run( candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, &metrics, ) .await; @@ -357,7 +351,7 @@ where return PreCheckOutcome::Invalid }; - let timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Precheck); + let timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Precheck); let pvf = match sp_maybe_compressed_blob::decompress( &validation_code.0, @@ -501,7 +495,7 @@ async fn validate_from_chain_state( candidate_receipt: CandidateReceipt, pov: Arc, executor_params: ExecutorParams, - exec_timeout_kind: PvfExecTimeoutKind, + exec_kind: PvfExecKind, metrics: &Metrics, ) -> Result where @@ -521,7 +515,7 @@ where candidate_receipt.clone(), pov, executor_params, - exec_timeout_kind, + exec_kind, metrics, ) .await; @@ -557,7 +551,7 @@ async fn validate_candidate_exhaustive( candidate_receipt: CandidateReceipt, pov: Arc, executor_params: ExecutorParams, - exec_timeout_kind: PvfExecTimeoutKind, + exec_kind: PvfExecKind, metrics: &Metrics, ) -> Result { let _timer = metrics.time_validate_candidate_exhaustive(); @@ -616,15 +610,32 @@ async fn validate_candidate_exhaustive( relay_parent_storage_root: persisted_validation_data.relay_parent_storage_root, }; - let result = validation_backend - .validate_candidate_with_retry( - raw_validation_code.to_vec(), - pvf_exec_timeout(&executor_params, exec_timeout_kind), - exec_timeout_kind, - params, - executor_params, - ) - .await; + let result = match exec_kind { + // Retry is disabled to reduce the chance of nondeterministic blocks getting backed and + // honest backers getting slashed. + PvfExecKind::Backing => { + let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); + let exec_timeout = pvf_exec_timeout(&executor_params, exec_kind); + let pvf = PvfPrepData::from_code( + raw_validation_code.to_vec(), + executor_params, + prep_timeout, + PrepareJobKind::Compilation, + ); + + validation_backend.validate_candidate(pvf, exec_timeout, params.encode()).await + }, + PvfExecKind::Approval => + validation_backend + .validate_candidate_with_retry( + raw_validation_code.to_vec(), + pvf_exec_timeout(&executor_params, exec_kind), + params, + executor_params, + PVF_APPROVAL_EXECUTION_RETRY_DELAY, + ) + .await, + }; if let Err(ref error) = result { gum::info!(target: LOG_TARGET, ?para_id, ?error, "Failed to validate candidate"); @@ -709,8 +720,8 @@ trait ValidationBackend { encoded_params: Vec, ) -> Result; - /// Tries executing a PVF. Will retry once if an error is encountered that may have been - /// transient. + /// Tries executing a PVF for the approval subsystem. Will retry once if an error is encountered + /// that may have been transient. /// /// NOTE: Should retry only on errors that are a result of execution itself, and not of /// preparation. @@ -718,11 +729,11 @@ trait ValidationBackend { &mut self, raw_validation_code: Vec, exec_timeout: Duration, - exec_timeout_kind: PvfExecTimeoutKind, params: ValidationParams, executor_params: ExecutorParams, + retry_delay: Duration, ) -> Result { - let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepTimeoutKind::Lenient); + let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. let pvf = PvfPrepData::from_code( raw_validation_code, @@ -740,11 +751,6 @@ trait ValidationBackend { return validation_result } - let retry_delay = match exec_timeout_kind { - PvfExecTimeoutKind::Backing => PVF_BACKING_EXECUTION_RETRY_DELAY, - PvfExecTimeoutKind::Approval => PVF_APPROVAL_EXECUTION_RETRY_DELAY, - }; - // Allow limited retries for each kind of error. let mut num_death_retries_left = 1; let mut num_job_error_retries_left = 1; @@ -867,22 +873,41 @@ fn perform_basic_checks( Ok(()) } -fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepTimeoutKind) -> Duration { +/// To determine the amount of timeout time for the pvf execution. +/// +/// Precheck +/// The time period after which the preparation worker is considered +/// unresponsive and will be killed. +/// +/// Prepare +///The time period after which the preparation worker is considered +/// unresponsive and will be killed. +fn pvf_prep_timeout(executor_params: &ExecutorParams, kind: PvfPrepKind) -> Duration { if let Some(timeout) = executor_params.pvf_prep_timeout(kind) { return timeout } match kind { - PvfPrepTimeoutKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT, - PvfPrepTimeoutKind::Lenient => DEFAULT_LENIENT_PREPARATION_TIMEOUT, + PvfPrepKind::Precheck => DEFAULT_PRECHECK_PREPARATION_TIMEOUT, + PvfPrepKind::Prepare => DEFAULT_LENIENT_PREPARATION_TIMEOUT, } } -fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecTimeoutKind) -> Duration { +/// To determine the amount of timeout time for the pvf execution. +/// +/// Backing subsystem +/// The amount of time to spend on execution during backing. +/// +/// Approval subsystem +/// The amount of time to spend on execution during approval or disputes. +/// This should be much longer than the backing execution timeout to ensure that in the +/// absence of extremely large disparities between hardware, blocks that pass backing are +/// considered executable by approval checkers or dispute participants. +fn pvf_exec_timeout(executor_params: &ExecutorParams, kind: PvfExecKind) -> Duration { if let Some(timeout) = executor_params.pvf_exec_timeout(kind) { return timeout } match kind { - PvfExecTimeoutKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT, - PvfExecTimeoutKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT, + PvfExecKind::Backing => DEFAULT_BACKING_EXECUTION_TIMEOUT, + PvfExecKind::Approval => DEFAULT_APPROVAL_EXECUTION_TIMEOUT, } } diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 801c581938a..5e2585d6873 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -436,7 +436,7 @@ fn candidate_validation_ok_is_ok() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -488,7 +488,7 @@ fn candidate_validation_bad_return_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -496,23 +496,20 @@ fn candidate_validation_bad_return_is_invalid() { assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::Timeout)); } -// Test that we vote valid if we get `AmbiguousWorkerDeath`, retry, and then succeed. -#[test] -fn candidate_validation_one_ambiguous_error_is_valid() { - let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - - let pov = PoV { block_data: BlockData(vec![1; 32]) }; - let head_data = HeadData(vec![1, 1, 1]); - let validation_code = ValidationCode(vec![2; 16]); - +fn perform_basic_checks_on_valid_candidate( + pov: &PoV, + validation_code: &ValidationCode, + validation_data: &PersistedValidationData, + head_data_hash: Hash, +) -> CandidateDescriptor { let descriptor = make_valid_candidate_descriptor( ParaId::from(1_u32), dummy_hash(), validation_data.hash(), pov.hash(), validation_code.hash(), - head_data.hash(), - dummy_hash(), + head_data_hash, + head_data_hash, Sr25519Keyring::Alice, ); @@ -523,6 +520,24 @@ fn candidate_validation_one_ambiguous_error_is_valid() { &validation_code.hash(), ); assert!(check.is_ok()); + descriptor +} + +// Test that we vote valid if we get `AmbiguousWorkerDeath`, retry, and then succeed. +#[test] +fn candidate_validation_one_ambiguous_error_is_valid() { + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; + + let pov = PoV { block_data: BlockData(vec![1; 32]) }; + let head_data = HeadData(vec![1, 1, 1]); + let validation_code = ValidationCode(vec![2; 16]); + + let descriptor = perform_basic_checks_on_valid_candidate( + &pov, + &validation_code, + &validation_data, + head_data.hash(), + ); let validation_result = WasmValidationResult { head_data, @@ -554,7 +569,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Approval, &Default::default(), )) .unwrap(); @@ -576,24 +591,12 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { let pov = PoV { block_data: BlockData(vec![1; 32]) }; let validation_code = ValidationCode(vec![2; 16]); - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - validation_code.hash(), - dummy_hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ); - - let check = perform_basic_checks( - &descriptor, - validation_data.max_pov_size, + let descriptor = perform_basic_checks_on_valid_candidate( &pov, - &validation_code.hash(), + &validation_code, + &validation_data, + dummy_hash(), ); - assert!(check.is_ok()); let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; @@ -607,7 +610,7 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Approval, &Default::default(), )) .unwrap(); @@ -615,58 +618,79 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::ExecutionError(_))); } -// Test that we retry on internal errors. +// Test that we retry for approval on internal errors. #[test] fn candidate_validation_retry_internal_errors() { - let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - - let pov = PoV { block_data: BlockData(vec![1; 32]) }; - let validation_code = ValidationCode(vec![2; 16]); - - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - validation_code.hash(), - dummy_hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ); - - let check = perform_basic_checks( - &descriptor, - validation_data.max_pov_size, - &pov, - &validation_code.hash(), + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Approval, + vec![ + Err(InternalValidationError::HostCommunication("foo".into()).into()), + // Throw an AJD error, we should still retry again. + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath( + "baz".into(), + ))), + // Throw another internal error. + Err(InternalValidationError::HostCommunication("bar".into()).into()), + ], ); - assert!(check.is_ok()); - - let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; + assert_matches!(v, Err(ValidationFailed(s)) if s.contains("bar")); +} - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result_list(vec![ +// Test that we don't retry for backing on internal errors. +#[test] +fn candidate_validation_dont_retry_internal_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Backing, + vec![ Err(InternalValidationError::HostCommunication("foo".into()).into()), // Throw an AWD error, we should still retry again. Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), // Throw another internal error. Err(InternalValidationError::HostCommunication("bar".into()).into()), - ]), - validation_data, - validation_code, - candidate_receipt, - Arc::new(pov), - ExecutorParams::default(), - PvfExecTimeoutKind::Backing, - &Default::default(), - )); + ], + ); - assert_matches!(v, Err(ValidationFailed(s)) if s.contains("bar")); + assert_matches!(v, Err(ValidationFailed(s)) if s.contains("foo")); } -// Test that we retry on panic errors. +// Test that we retry for approval on panic errors. #[test] fn candidate_validation_retry_panic_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Approval, + vec![ + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))), + // Throw an AWD error, we should still retry again. + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + // Throw another panic error. + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))), + ], + ); + + assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "bar".to_string()); +} + +// Test that we don't retry for backing on panic errors. +#[test] +fn candidate_validation_dont_retry_panic_errors() { + let v = candidate_validation_retry_on_error_helper( + PvfExecKind::Backing, + vec![ + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))), + // Throw an AWD error, we should still retry again. + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + // Throw another panic error. + Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))), + ], + ); + + assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "foo".to_string()); +} + +fn candidate_validation_retry_on_error_helper( + exec_kind: PvfExecKind, + mock_errors: Vec>, +) -> Result { let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; @@ -693,26 +717,16 @@ fn candidate_validation_retry_panic_errors() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))), - // Throw an AJD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath( - "baz".into(), - ))), - // Throw another panic error. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))), - ]), + return executor::block_on(validate_candidate_exhaustive( + MockValidateCandidateBackend::with_hardcoded_result_list(mock_errors), validation_data, validation_code, candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + exec_kind, &Default::default(), )); - - assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(s))) if s == "bar".to_string()); } #[test] @@ -752,7 +766,7 @@ fn candidate_validation_timeout_is_internal_error() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -797,7 +811,7 @@ fn candidate_validation_commitment_hash_mismatch_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -846,7 +860,7 @@ fn candidate_validation_code_mismatch_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )) .unwrap(); @@ -903,7 +917,7 @@ fn compressed_code_works() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -954,7 +968,7 @@ fn code_decompression_failure_is_error() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); @@ -1006,7 +1020,7 @@ fn pov_decompression_failure_is_invalid() { candidate_receipt, Arc::new(pov), ExecutorParams::default(), - PvfExecTimeoutKind::Backing, + PvfExecKind::Backing, &Default::default(), )); diff --git a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs index 90268516e9d..05ea7323af1 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/mod.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/mod.rs @@ -32,7 +32,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash; use polkadot_primitives::{ - BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecTimeoutKind, SessionIndex, + BlockNumber, CandidateHash, CandidateReceipt, Hash, PvfExecKind, SessionIndex, }; use crate::LOG_TARGET; @@ -386,7 +386,7 @@ async fn participate( candidate_receipt: req.candidate_receipt().clone(), pov: available_data.pov, executor_params: req.executor_params(), - exec_timeout_kind: PvfExecTimeoutKind::Approval, + exec_kind: PvfExecKind::Approval, response_sender: validation_tx, }) .await; diff --git a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs index 0aa0d772005..012df51d0cd 100644 --- a/polkadot/node/core/dispute-coordinator/src/participation/tests.rs +++ b/polkadot/node/core/dispute-coordinator/src/participation/tests.rs @@ -115,8 +115,8 @@ pub async fn participation_full_happy_path( assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_timeout_kind, response_sender, .. } - ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { candidate_receipt, exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { if expected_commitments_hash != candidate_receipt.commitments_hash { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); } else { @@ -450,8 +450,8 @@ fn cast_invalid_vote_if_validation_fails_or_is_invalid() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } - ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))).unwrap(); }, "overseer did not receive candidate validation message", @@ -487,8 +487,8 @@ fn cast_invalid_vote_if_commitments_dont_match() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } - ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { response_sender.send(Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch))).unwrap(); }, "overseer did not receive candidate validation message", @@ -524,8 +524,8 @@ fn cast_valid_vote_if_validation_passes() { assert_matches!( ctx_handle.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromExhaustive { exec_timeout_kind, response_sender, .. } - ) if exec_timeout_kind == PvfExecTimeoutKind::Approval => { + CandidateValidationMessage::ValidateFromExhaustive { exec_kind, response_sender, .. } + ) if exec_kind == PvfExecKind::Approval => { response_sender.send(Ok(ValidationResult::Valid(dummy_candidate_commitments(None), PersistedValidationData::default()))).unwrap(); }, "overseer did not receive candidate validation message", diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 474887ee8df..20b6654638e 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem::{ use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, - PvfExecTimeoutKind, + PvfExecKind, }; use futures::channel::oneshot; @@ -90,10 +90,10 @@ impl FakeCandidateValidation { } } - fn should_misbehave(&self, timeout: PvfExecTimeoutKind) -> bool { + fn should_misbehave(&self, timeout: PvfExecKind) -> bool { match timeout { - PvfExecTimeoutKind::Backing => self.includes_backing(), - PvfExecTimeoutKind::Approval => self.includes_approval(), + PvfExecKind::Backing => self.includes_backing(), + PvfExecKind::Approval => self.includes_approval(), } } } @@ -279,13 +279,13 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, .. }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(exec_timeout_kind) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { @@ -295,7 +295,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) @@ -333,14 +333,14 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) }, } }, - x if x.misbehaves_invalid() && x.should_misbehave(exec_timeout_kind) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => { // Set the validation result to invalid with probability `p` and trigger a // dispute let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -373,7 +373,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) @@ -388,7 +388,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }), @@ -401,13 +401,13 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, .. }, } => { match self.fake_validation { - x if x.misbehaves_valid() && x.should_misbehave(exec_timeout_kind) => { + x if x.misbehaves_valid() && x.should_misbehave(exec_kind) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { @@ -415,7 +415,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) @@ -445,13 +445,13 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }), } }, - x if x.misbehaves_invalid() && x.should_misbehave(exec_timeout_kind) => { + x if x.misbehaves_invalid() && x.should_misbehave(exec_kind) => { // Maliciously set the validation result to invalid for a valid candidate // with probability `p` let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -479,7 +479,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }) @@ -491,7 +491,7 @@ where candidate_receipt, pov, executor_params, - exec_timeout_kind, + exec_kind, response_sender, }, }), diff --git a/polkadot/node/overseer/examples/minimal-example.rs b/polkadot/node/overseer/examples/minimal-example.rs index b2c0ea2f75a..857cdba673d 100644 --- a/polkadot/node/overseer/examples/minimal-example.rs +++ b/polkadot/node/overseer/examples/minimal-example.rs @@ -32,7 +32,7 @@ use polkadot_overseer::{ gen::{FromOrchestra, SpawnedSubsystem}, HeadSupportsParachains, SubsystemError, }; -use polkadot_primitives::{CandidateReceipt, Hash, PvfExecTimeoutKind}; +use polkadot_primitives::{CandidateReceipt, Hash, PvfExecKind}; struct AlwaysSupportsParachains; @@ -77,7 +77,7 @@ impl Subsystem1 { candidate_receipt, pov: PoV { block_data: BlockData(Vec::new()) }.into(), executor_params: Default::default(), - exec_timeout_kind: PvfExecTimeoutKind::Backing, + exec_kind: PvfExecKind::Backing, response_sender: tx, }; ctx.send_message(msg).await; diff --git a/polkadot/node/overseer/src/tests.rs b/polkadot/node/overseer/src/tests.rs index 254f5fe4512..0494274367d 100644 --- a/polkadot/node/overseer/src/tests.rs +++ b/polkadot/node/overseer/src/tests.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem_types::messages::{ }; use polkadot_primitives::{ CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, - PvfExecTimeoutKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, + PvfExecKind, SessionIndex, ValidDisputeStatementKind, ValidatorIndex, }; use crate::{ @@ -106,7 +106,7 @@ where candidate_receipt, pov: PoV { block_data: BlockData(Vec::new()) }.into(), executor_params: Default::default(), - exec_timeout_kind: PvfExecTimeoutKind::Backing, + exec_kind: PvfExecKind::Backing, response_sender: tx, }) .await; @@ -804,7 +804,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage { candidate_receipt, pov, executor_params: Default::default(), - exec_timeout_kind: PvfExecTimeoutKind::Backing, + exec_kind: PvfExecKind::Backing, response_sender, } } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 43456daec30..44c6f27b17c 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -47,7 +47,7 @@ use polkadot_primitives::{ CommittedCandidateReceipt, CoreState, DisputeState, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, PvfExecTimeoutKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, + PvfCheckStatement, PvfExecKind, SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; @@ -150,8 +150,8 @@ pub enum CandidateValidationMessage { pov: Arc, /// Session's executor parameters executor_params: ExecutorParams, - /// Execution timeout kind (backing/approvals) - exec_timeout_kind: PvfExecTimeoutKind, + /// Execution kind, used for timeouts and retries (backing/approvals) + exec_kind: PvfExecKind, /// The sending side of the response channel response_sender: oneshot::Sender>, }, @@ -175,8 +175,8 @@ pub enum CandidateValidationMessage { pov: Arc, /// Session's executor parameters executor_params: ExecutorParams, - /// Execution timeout kind (backing/approvals) - exec_timeout_kind: PvfExecTimeoutKind, + /// Execution kind, used for timeouts and retries (backing/approvals) + exec_kind: PvfExecKind, /// The sending side of the response channel response_sender: oneshot::Sender>, }, diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 4ba8b8b031f..2570bcadf60 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -48,11 +48,11 @@ pub use v6::{ HorizontalMessages, HrmpChannelId, Id, InboundDownwardMessage, InboundHrmpMessage, IndexedVec, InherentData, InvalidDisputeStatementKind, Moment, MultiDisputeStatementSet, Nonce, OccupiedCore, OccupiedCoreAssumption, OutboundHrmpMessage, ParathreadClaim, ParathreadEntry, - PersistedValidationData, PvfCheckStatement, PvfExecTimeoutKind, PvfPrepTimeoutKind, - RuntimeMetricLabel, RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, - RuntimeMetricOp, RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, - SessionInfo, Signature, Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, - SignedStatement, SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, + PersistedValidationData, PvfCheckStatement, PvfExecKind, PvfPrepKind, RuntimeMetricLabel, + RuntimeMetricLabelValue, RuntimeMetricLabelValues, RuntimeMetricLabels, RuntimeMetricOp, + RuntimeMetricUpdate, ScheduledCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signature, + Signed, SignedAvailabilityBitfield, SignedAvailabilityBitfields, SignedStatement, + SigningContext, Slot, UncheckedSigned, UncheckedSignedAvailabilityBitfield, UncheckedSignedAvailabilityBitfields, UncheckedSignedStatement, UpgradeGoAhead, UpgradeRestriction, UpwardMessage, ValidDisputeStatementKind, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, diff --git a/polkadot/primitives/src/v6/executor_params.rs b/polkadot/primitives/src/v6/executor_params.rs index bb9980f6879..112a529f62b 100644 --- a/polkadot/primitives/src/v6/executor_params.rs +++ b/polkadot/primitives/src/v6/executor_params.rs @@ -21,7 +21,7 @@ //! by the first element of the vector). Decoding to a usable semantics structure is //! done in `polkadot-node-core-pvf`. -use crate::{BlakeTwo256, HashT as _, PvfExecTimeoutKind, PvfPrepTimeoutKind}; +use crate::{BlakeTwo256, HashT as _, PvfExecKind, PvfPrepKind}; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives::Hash; use scale_info::TypeInfo; @@ -45,7 +45,7 @@ pub const PRECHECK_MEM_MAX_LO: u64 = 256 * 1024 * 1024; pub const PRECHECK_MEM_MAX_HI: u64 = 16 * 1024 * 1024 * 1024; // Default PVF timeouts. Must never be changed! Use executor environment parameters to adjust them. -// See also `PvfPrepTimeoutKind` and `PvfExecTimeoutKind` docs. +// See also `PvfPrepKind` and `PvfExecKind` docs. /// Default PVF preparation timeout for prechecking requests. pub const DEFAULT_PRECHECK_PREPARATION_TIMEOUT: Duration = Duration::from_secs(60); @@ -99,12 +99,12 @@ pub enum ExecutorParam { /// Always ensure that `precheck_timeout` < `lenient_timeout`. /// When absent, the default values will be used. #[codec(index = 5)] - PvfPrepTimeout(PvfPrepTimeoutKind, u64), + PvfPrepTimeout(PvfPrepKind, u64), /// PVF execution timeouts, in millisecond. /// Always ensure that `backing_timeout` < `approval_timeout`. /// When absent, the default values will be used. #[codec(index = 6)] - PvfExecTimeout(PvfExecTimeoutKind, u64), + PvfExecTimeout(PvfExecKind, u64), /// Enables WASM bulk memory proposal #[codec(index = 7)] WasmExtBulkMemory, @@ -174,7 +174,7 @@ impl ExecutorParams { } /// Returns a PVF preparation timeout, if any - pub fn pvf_prep_timeout(&self, kind: PvfPrepTimeoutKind) -> Option { + pub fn pvf_prep_timeout(&self, kind: PvfPrepKind) -> Option { for param in &self.0 { if let ExecutorParam::PvfPrepTimeout(k, timeout) = param { if kind == *k { @@ -186,7 +186,7 @@ impl ExecutorParams { } /// Returns a PVF execution timeout, if any - pub fn pvf_exec_timeout(&self, kind: PvfExecTimeoutKind) -> Option { + pub fn pvf_exec_timeout(&self, kind: PvfExecKind) -> Option { for param in &self.0 { if let ExecutorParam::PvfExecTimeout(k, timeout) = param { if kind == *k { @@ -242,12 +242,12 @@ impl ExecutorParams { StackNativeMax(_) => "StackNativeMax", PrecheckingMaxMemory(_) => "PrecheckingMaxMemory", PvfPrepTimeout(kind, _) => match kind { - PvfPrepTimeoutKind::Precheck => "PvfPrepTimeoutKind::Precheck", - PvfPrepTimeoutKind::Lenient => "PvfPrepTimeoutKind::Lenient", + PvfPrepKind::Precheck => "PvfPrepKind::Precheck", + PvfPrepKind::Prepare => "PvfPrepKind::Prepare", }, PvfExecTimeout(kind, _) => match kind { - PvfExecTimeoutKind::Backing => "PvfExecTimeoutKind::Backing", - PvfExecTimeoutKind::Approval => "PvfExecTimeoutKind::Approval", + PvfExecKind::Backing => "PvfExecKind::Backing", + PvfExecKind::Approval => "PvfExecKind::Approval", }, WasmExtBulkMemory => "WasmExtBulkMemory", }; @@ -297,30 +297,23 @@ impl ExecutorParams { } if let (Some(precheck), Some(lenient)) = ( - seen.get("PvfPrepTimeoutKind::Precheck") + seen.get("PvfPrepKind::Precheck") .or(Some(&DEFAULT_PRECHECK_PREPARATION_TIMEOUT_MS)), - seen.get("PvfPrepTimeoutKind::Lenient") + seen.get("PvfPrepKind::Prepare") .or(Some(&DEFAULT_LENIENT_PREPARATION_TIMEOUT_MS)), ) { if *precheck >= *lenient { - return Err(IncompatibleValues( - "PvfPrepTimeoutKind::Precheck", - "PvfPrepTimeoutKind::Lenient", - )) + return Err(IncompatibleValues("PvfPrepKind::Precheck", "PvfPrepKind::Prepare")) } } if let (Some(backing), Some(approval)) = ( - seen.get("PvfExecTimeoutKind::Backing") - .or(Some(&DEFAULT_BACKING_EXECUTION_TIMEOUT_MS)), - seen.get("PvfExecTimeoutKind::Approval") + seen.get("PvfExecKind::Backing").or(Some(&DEFAULT_BACKING_EXECUTION_TIMEOUT_MS)), + seen.get("PvfExecKind::Approval") .or(Some(&DEFAULT_APPROVAL_EXECUTION_TIMEOUT_MS)), ) { if *backing >= *approval { - return Err(IncompatibleValues( - "PvfExecTimeoutKind::Backing", - "PvfExecTimeoutKind::Approval", - )) + return Err(IncompatibleValues("PvfExecKind::Backing", "PvfExecKind::Approval")) } } diff --git a/polkadot/primitives/src/v6/mod.rs b/polkadot/primitives/src/v6/mod.rs index 9371b3db406..83b590dc320 100644 --- a/polkadot/primitives/src/v6/mod.rs +++ b/polkadot/primitives/src/v6/mod.rs @@ -1781,30 +1781,22 @@ impl WellKnownKey { } } -/// Type discriminator for PVF preparation timeouts +/// Type discriminator for PVF preparation. #[derive(Encode, Decode, TypeInfo, Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum PvfPrepTimeoutKind { - /// For prechecking requests, the time period after which the preparation worker is considered - /// unresponsive and will be killed. +pub enum PvfPrepKind { + /// For prechecking requests. Precheck, - /// For execution and heads-up requests, the time period after which the preparation worker is - /// considered unresponsive and will be killed. More lenient than the timeout for prechecking - /// to prevent honest validators from timing out on valid PVFs. - Lenient, + /// For execution and heads-up requests. + Prepare, } -/// Type discriminator for PVF execution timeouts +/// Type discriminator for PVF execution. #[derive(Encode, Decode, TypeInfo, Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub enum PvfExecTimeoutKind { - /// The amount of time to spend on execution during backing. +pub enum PvfExecKind { + /// For backing requests. Backing, - - /// The amount of time to spend on execution during approval or disputes. - /// - /// This should be much longer than the backing execution timeout to ensure that in the - /// absence of extremely large disparities between hardware, blocks that pass backing are - /// considered executable by approval checkers or dispute participants. + /// For approval and dispute request. Approval, } diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index 4dbb7980c1b..74d88ba3ad9 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -31,6 +31,10 @@ hopefully resolve. We use a more brief delay here (1 second as opposed to 15 minutes for preparation (see above)), because a successful execution must happen in a short amount of time. +If the execution fails during the backing phase, we won't retry to reduce the chance of +supporting nondeterministic candidates. This reduces the chance of nondeterministic blocks +getting backed and honest backers getting slashed. + We currently know of the following specific cases that will lead to a retried execution request: diff --git a/polkadot/runtime/parachains/src/configuration/benchmarking.rs b/polkadot/runtime/parachains/src/configuration/benchmarking.rs index 508e0579a09..67daf1c4598 100644 --- a/polkadot/runtime/parachains/src/configuration/benchmarking.rs +++ b/polkadot/runtime/parachains/src/configuration/benchmarking.rs @@ -17,7 +17,7 @@ use crate::configuration::*; use frame_benchmarking::{benchmarks, BenchmarkError, BenchmarkResult}; use frame_system::RawOrigin; -use primitives::{ExecutorParam, ExecutorParams, PvfExecTimeoutKind, PvfPrepTimeoutKind}; +use primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; use sp_runtime::traits::One; benchmarks! { @@ -41,10 +41,10 @@ benchmarks! { ExecutorParam::StackNativeMax(256 * 1024 * 1024), ExecutorParam::WasmExtBulkMemory, ExecutorParam::PrecheckingMaxMemory(2 * 1024 * 1024 * 1024), - ExecutorParam::PvfPrepTimeout(PvfPrepTimeoutKind::Precheck, 60_000), - ExecutorParam::PvfPrepTimeout(PvfPrepTimeoutKind::Lenient, 360_000), - ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Backing, 2_000), - ExecutorParam::PvfExecTimeout(PvfExecTimeoutKind::Approval, 12_000), + ExecutorParam::PvfPrepTimeout(PvfPrepKind::Precheck, 60_000), + ExecutorParam::PvfPrepTimeout(PvfPrepKind::Prepare, 360_000), + ExecutorParam::PvfExecTimeout(PvfExecKind::Backing, 2_000), + ExecutorParam::PvfExecTimeout(PvfExecKind::Approval, 12_000), ][..])) set_config_with_perbill {}: set_on_demand_fee_variability(RawOrigin::Root, Perbill::from_percent(100)) -- GitLab From 126f64a91ee0c0333f368fe67bb00d56d68ecb34 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Tue, 21 Nov 2023 07:38:04 -0300 Subject: [PATCH 085/199] bump zombienet version `v1.3.82` (#2396) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Includes: - bump `pjs` modules versions - re-enable test disable in https://github.com/paritytech/polkadot-sdk/pull/2294 --------- Co-authored-by: Bastian Köcher --- .gitlab-ci.yml | 4 ---- .gitlab/pipeline/zombienet.yml | 5 +++++ .gitlab/pipeline/zombienet/substrate.yml | 15 +++++++-------- .../skip-feeless-payment/src/lib.rs | 15 +++++++++------ 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b485ca3317e..e698aba81d5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,6 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.80" DOCKER_IMAGES_VERSION: "${CI_COMMIT_SHA}" default: @@ -199,9 +198,6 @@ default: - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^gh-readonly-queue.*$/ # merge queues -.zombienet-refs: - extends: .build-refs - include: # check jobs - .gitlab/pipeline/check.yml diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index 64210d6a00a..fc3fd125619 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,3 +1,8 @@ +.zombienet-refs: + extends: .build-refs + variables: + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.82" + include: # substrate tests - .gitlab/pipeline/zombienet/substrate.yml diff --git a/.gitlab/pipeline/zombienet/substrate.yml b/.gitlab/pipeline/zombienet/substrate.yml index 6334e7db9a3..b687576267d 100644 --- a/.gitlab/pipeline/zombienet/substrate.yml +++ b/.gitlab/pipeline/zombienet/substrate.yml @@ -40,14 +40,13 @@ tags: - zombienet-polkadot-integration-test -# Skip this one until PolkadotJS includes `SkipCheckIfFeeless` extension -# zombienet-substrate-0000-block-building: -# extends: -# - .zombienet-substrate-common -# script: -# - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh -# --local-dir="${LOCAL_DIR}/0000-block-building" -# --test="block-building.zndsl" +zombienet-substrate-0000-block-building: + extends: + - .zombienet-substrate-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/0000-block-building" + --test="block-building.zndsl" zombienet-substrate-0001-basic-warp-sync: extends: diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 923c7e7ebc2..55afaaf93d7 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -77,9 +77,9 @@ pub mod pallet { /// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] -pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); +pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); -impl sp_std::fmt::Debug for SkipCheckIfFeeless { +impl sp_std::fmt::Debug for SkipCheckIfFeeless { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { write!(f, "SkipCheckIfFeeless<{:?}>", self.0.encode()) @@ -90,9 +90,8 @@ impl sp_std::fmt::Debug for SkipCheckIfFeeless SkipCheckIfFeeless { - /// utility constructor. Used only in client/factory code. - pub fn from(s: S) -> Self { +impl From for SkipCheckIfFeeless { + fn from(s: S) -> Self { Self(s, sp_std::marker::PhantomData) } } @@ -106,7 +105,11 @@ where type Call = S::Call; type AdditionalSigned = S::AdditionalSigned; type Pre = (Self::AccountId, Option<::Pre>); - const IDENTIFIER: &'static str = "SkipCheckIfFeeless"; + // From the outside this extension should be "invisible", because it just extends the wrapped + // extension with an extra check in `pre_dispatch` and `post_dispatch`. Thus, we should forward + // the identifier of the wrapped extension to let wallets see this extension as it would only be + // the wrapped extension itself. + const IDENTIFIER: &'static str = S::IDENTIFIER; fn additional_signed(&self) -> Result { self.0.additional_signed() -- GitLab From 2fd8c51ebcfcf7d10cab2a2827211583418a90a0 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 21 Nov 2023 11:51:23 +0100 Subject: [PATCH 086/199] `chain-spec-builder`: cleanup (#2174) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR removes: - `New`, `Generate`, `Edit` commands, - `kitchensink` dependency from the `chain-spec-builder` util. New `convert-to-raw`, `update-code` commands were added. Additionally renames the `runtime` command (which was added in #1256) to `create`. --------- Co-authored-by: Sebastian Kunert Co-authored-by: Bastian Köcher Co-authored-by: command-bot <> --- Cargo.lock | 7 - .../bin/utils/chain-spec-builder/Cargo.toml | 7 - .../bin/utils/chain-spec-builder/bin/main.rs | 127 ++------ .../bin/utils/chain-spec-builder/src/lib.rs | 295 +++++------------- 4 files changed, 116 insertions(+), 320 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b2d655e6f41..697f232f971 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18315,18 +18315,11 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" name = "staging-chain-spec-builder" version = "2.0.0" dependencies = [ - "ansi_term", "clap 4.4.6", - "kitchensink-runtime", "log", - "rand 0.8.5", "sc-chain-spec", - "sc-keystore", "serde_json", - "sp-core", - "sp-keystore", "sp-tracing 10.0.0", - "staging-node-cli", ] [[package]] diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index 0b373b8e924..f587989e003 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -20,15 +20,8 @@ name = "chain-spec-builder" crate-type = ["rlib"] [dependencies] -ansi_term = "0.12.1" clap = { version = "4.4.6", features = ["derive"] } -rand = "0.8" -kitchensink-runtime = { version = "3.0.0-dev", path = "../../node/runtime" } log = "0.4.17" -node-cli = { package = "staging-node-cli", path = "../../node/cli" } sc-chain-spec = { path = "../../../client/chain-spec" } -sc-keystore = { path = "../../../client/keystore" } serde_json = "1.0.108" -sp-core = { path = "../../../primitives/core" } -sp-keystore = { path = "../../../primitives/keystore" } sp-tracing = { version = "10.0.0", path = "../../../primitives/tracing" } diff --git a/substrate/bin/utils/chain-spec-builder/bin/main.rs b/substrate/bin/utils/chain-spec-builder/bin/main.rs index 83892afd6ac..986293179a9 100644 --- a/substrate/bin/utils/chain-spec-builder/bin/main.rs +++ b/substrate/bin/utils/chain-spec-builder/bin/main.rs @@ -17,14 +17,11 @@ // along with this program. If not, see . use chain_spec_builder::{ - generate_authority_keys_and_store, generate_chain_spec, generate_chain_spec_for_runtime, - print_seeds, ChainSpecBuilder, ChainSpecBuilderCmd, EditCmd, GenerateCmd, NewCmd, VerifyCmd, + generate_chain_spec_for_runtime, ChainSpecBuilder, ChainSpecBuilderCmd, ConvertToRawCmd, + UpdateCodeCmd, VerifyCmd, }; use clap::Parser; -use node_cli::chain_spec; -use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; use sc_chain_spec::{update_code_in_json_chain_spec, GenericChainSpec}; -use sp_core::{crypto::Ss58Codec, sr25519}; use staging_chain_spec_builder as chain_spec_builder; use std::fs; @@ -32,110 +29,48 @@ fn main() -> Result<(), String> { sp_tracing::try_init_simple(); let builder = ChainSpecBuilder::parse(); - #[cfg(build_type = "debug")] - if matches!(builder.command, ChainSpecBuilderCmd::Generate(_) | ChainSpecBuilderCmd::New(_)) { - println!( - "The chain spec builder builds a chain specification that includes a Substrate runtime \ - compiled as WASM. To ensure proper functioning of the included runtime compile (or run) \ - the chain spec builder binary in `--release` mode.\n", - ); - } - let chain_spec_path = builder.chain_spec_path.to_path_buf(); - let mut write_chain_spec = true; - - let chain_spec_json = match builder.command { - ChainSpecBuilderCmd::Generate(GenerateCmd { - authorities, - nominators, - endowed, - keystore_path, - }) => { - let authorities = authorities.max(1); - let rand_str = || -> String { - OsRng.sample_iter(&Alphanumeric).take(32).map(char::from).collect() - }; - - let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); - let nominator_seeds = (0..nominators).map(|_| rand_str()).collect::>(); - let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); - let sudo_seed = rand_str(); - - print_seeds(&authority_seeds, &nominator_seeds, &endowed_seeds, &sudo_seed); - - if let Some(keystore_path) = keystore_path { - generate_authority_keys_and_store(&authority_seeds, &keystore_path)?; - } - - let nominator_accounts = nominator_seeds - .into_iter() - .map(|seed| { - chain_spec::get_account_id_from_seed::(&seed).to_ss58check() - }) - .collect(); - - let endowed_accounts = endowed_seeds - .into_iter() - .map(|seed| { - chain_spec::get_account_id_from_seed::(&seed).to_ss58check() - }) - .collect(); - let sudo_account = - chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); - - generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account) + match builder.command { + ChainSpecBuilderCmd::Create(cmd) => { + let chain_spec_json = generate_chain_spec_for_runtime(&cmd)?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; }, - ChainSpecBuilderCmd::New(NewCmd { - authority_seeds, - nominator_accounts, - endowed_accounts, - sudo_account, - }) => - generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account), - ChainSpecBuilderCmd::Runtime(cmd) => generate_chain_spec_for_runtime(&cmd), - ChainSpecBuilderCmd::Edit(EditCmd { + ChainSpecBuilderCmd::UpdateCode(UpdateCodeCmd { ref input_chain_spec, ref runtime_wasm_path, - convert_to_raw, }) => { let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; let mut chain_spec_json = - serde_json::from_str::(&chain_spec.as_json(convert_to_raw)?) + serde_json::from_str::(&chain_spec.as_json(false)?) .map_err(|e| format!("Conversion to json failed: {e}"))?; - if let Some(path) = runtime_wasm_path { - update_code_in_json_chain_spec( - &mut chain_spec_json, - &fs::read(path.as_path()) - .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], - ); - } - - serde_json::to_string_pretty(&chain_spec_json) - .map_err(|e| format!("to pretty failed: {e}")) + update_code_in_json_chain_spec( + &mut chain_spec_json, + &fs::read(runtime_wasm_path.as_path()) + .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], + ); + + let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json) + .map_err(|e| format!("to pretty failed: {e}"))?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; }, - ChainSpecBuilderCmd::Verify(VerifyCmd { ref input_chain_spec, ref runtime_wasm_path }) => { - write_chain_spec = false; + ChainSpecBuilderCmd::ConvertToRaw(ConvertToRawCmd { ref input_chain_spec }) => { let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; - let mut chain_spec_json = + + let chain_spec_json = serde_json::from_str::(&chain_spec.as_json(true)?) .map_err(|e| format!("Conversion to json failed: {e}"))?; - if let Some(path) = runtime_wasm_path { - update_code_in_json_chain_spec( - &mut chain_spec_json, - &fs::read(path.as_path()) - .map_err(|e| format!("Wasm blob file could not be read: {e}"))?[..], - ); - }; - serde_json::to_string_pretty(&chain_spec_json) - .map_err(|e| format!("to pretty failed: {e}")) - }, - }?; - if write_chain_spec { - fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string()) - } else { - Ok(()) - } + let chain_spec_json = serde_json::to_string_pretty(&chain_spec_json) + .map_err(|e| format!("Conversion to pretty failed: {e}"))?; + fs::write(chain_spec_path, chain_spec_json).map_err(|err| err.to_string())?; + }, + ChainSpecBuilderCmd::Verify(VerifyCmd { ref input_chain_spec }) => { + let chain_spec = GenericChainSpec::<()>::from_json_file(input_chain_spec.clone())?; + let _ = serde_json::from_str::(&chain_spec.as_json(true)?) + .map_err(|e| format!("Conversion to json failed: {e}"))?; + }, + }; + Ok(()) } diff --git a/substrate/bin/utils/chain-spec-builder/src/lib.rs b/substrate/bin/utils/chain-spec-builder/src/lib.rs index 6f21b68c368..60ec0f1a656 100644 --- a/substrate/bin/utils/chain-spec-builder/src/lib.rs +++ b/substrate/bin/utils/chain-spec-builder/src/lib.rs @@ -21,31 +21,69 @@ //! A chain-spec is short for `chain-configuration`. See the [`sc-chain-spec`] for more information. //! //! Note that this binary is analogous to the `build-spec` subcommand, contained in typical -//! substrate-based nodes. This particular binary is capable of building a more sophisticated chain -//! specification that can be used with the substrate-node, ie. [`node-cli`]. +//! substrate-based nodes. This particular binary is capable of interacting with +//! [`sp-genesis-builder`] implementation of any provided runtime allowing to build chain-spec JSON +//! files. //! -//! See [`ChainSpecBuilder`] for a list of available commands. +//! See [`ChainSpecBuilderCmd`] for a list of available commands. +//! +//! ## Typical use-cases. +//! ##### Get default config from runtime. +//! +//! Query the default genesis config from the provided `runtime.wasm` and use it in the chain +//! spec. Tool can also store runtime's default genesis config in given file: +//! ```text +//! chain-spec-builder create -r runtime.wasm default /dev/stdout +//! ``` +//! +//! _Note:_ [`GenesisBuilder::create_default_config`][sp-genesis-builder-create] runtime function is called. +//! +//! +//! ##### Generate raw storage chain spec using genesis config patch. +//! +//! Patch the runtime's default genesis config with provided `patch.json` and generate raw +//! storage (`-s`) version of chain spec: +//! ```text +//! chain-spec-builder create -s -r runtime.wasm patch patch.json +//! ``` +//! +//! _Note:_ [`GenesisBuilder::build_config`][sp-genesis-builder-build] runtime function is called. +//! +//! ##### Generate raw storage chain spec using full genesis config. +//! +//! Build the chain spec using provided full genesis config json file. No defaults will be used: +//! ```text +//! chain-spec-builder create -s -r runtime.wasm full full-genesis-config.json +//! ``` +//! +//! _Note_: [`GenesisBuilder::build_config`][sp-genesis-builder-build] runtime function is called. +//! +//! ##### Generate human readable chain spec using provided genesis config patch. +//! ```text +//! chain-spec-builder create -r runtime.wasm patch patch.json +//! ``` +//! +//! ##### Generate human readable chain spec using provided full genesis config. +//! ```text +//! chain-spec-builder create -r runtime.wasm full full-genesis-config.json +//! ``` +//! +//! ##### Extra tools. +//! The `chain-spec-builder` provides also some extra utilities: [`VerifyCmd`], [`ConvertToRawCmd`], [`UpdateCodeCmd`]. //! //! [`sc-chain-spec`]: ../sc_chain_spec/index.html //! [`node-cli`]: ../node_cli/index.html +//! [`sp-genesis-builder`]: ../sp_genesis_builder/index.html +//! [sp-genesis-builder-create]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.create_default_config +//! [sp-genesis-builder-build]: ../sp_genesis_builder/trait.GenesisBuilder.html#method.build_config -use std::{ - fs, - path::{Path, PathBuf}, -}; +use std::{fs, path::PathBuf}; -use ansi_term::Style; use clap::{Parser, Subcommand}; -use sc_chain_spec::GenesisConfigBuilderRuntimeCaller; - -use node_cli::chain_spec::{self, AccountId}; -use sc_keystore::LocalKeystore; +use sc_chain_spec::{GenericChainSpec, GenesisConfigBuilderRuntimeCaller}; use serde_json::Value; -use sp_core::crypto::{ByteArray, Ss58Codec}; -use sp_keystore::KeystorePtr; -/// A utility to easily create a testnet chain spec definition with a given set -/// of authorities and endowed accounts and/or generate random accounts. +/// A utility to easily create a chain spec definition. #[derive(Debug, Parser)] #[command(rename_all = "kebab-case")] pub struct ChainSpecBuilder { @@ -59,70 +97,25 @@ pub struct ChainSpecBuilder { #[derive(Debug, Subcommand)] #[command(rename_all = "kebab-case")] pub enum ChainSpecBuilderCmd { - New(NewCmd), - Generate(GenerateCmd), - Runtime(RuntimeCmd), - Edit(EditCmd), + Create(CreateCmd), Verify(VerifyCmd), -} - -/// Create a new chain spec with the given authorities, endowed and sudo -/// accounts. Only works for kitchen-sink runtime -#[derive(Parser, Debug)] -#[command(rename_all = "kebab-case")] -pub struct NewCmd { - /// Authority key seed. - #[arg(long, short, required = true)] - pub authority_seeds: Vec, - /// Active nominators (SS58 format), each backing a random subset of the aforementioned - /// authorities. - #[arg(long, short, default_value = "0")] - pub nominator_accounts: Vec, - /// Endowed account address (SS58 format). - #[arg(long, short)] - pub endowed_accounts: Vec, - /// Sudo account address (SS58 format). - #[arg(long, short)] - pub sudo_account: String, -} - -/// Create a new chain spec with the given number of authorities and endowed -/// accounts. Random keys will be generated as required. -#[derive(Parser, Debug)] -pub struct GenerateCmd { - /// The number of authorities. - #[arg(long, short)] - pub authorities: usize, - /// The number of nominators backing the aforementioned authorities. - /// - /// Will nominate a random subset of `authorities`. - #[arg(long, short, default_value_t = 0)] - pub nominators: usize, - /// The number of endowed accounts. - #[arg(long, short, default_value_t = 0)] - pub endowed: usize, - /// Path to use when saving generated keystores for each authority. - /// - /// At this path, a new folder will be created for each authority's - /// keystore named `auth-$i` where `i` is the authority index, i.e. - /// `auth-0`, `auth-1`, etc. - #[arg(long, short)] - pub keystore_path: Option, + UpdateCode(UpdateCodeCmd), + ConvertToRaw(ConvertToRawCmd), } /// Create a new chain spec by interacting with the provided runtime wasm blob. #[derive(Parser, Debug)] -pub struct RuntimeCmd { - /// The name of chain +pub struct CreateCmd { + /// The name of chain. #[arg(long, short = 'n', default_value = "Custom")] chain_name: String, - /// The chain id + /// The chain id. #[arg(long, short = 'i', default_value = "custom")] chain_id: String, - /// The path to runtime wasm blob + /// The path to runtime wasm blob. #[arg(long, short)] runtime_wasm_path: PathBuf, - /// Export chainspec as raw storage + /// Export chainspec as raw storage. #[arg(long, short = 's')] raw_storage: bool, /// Verify the genesis config. This silently generates the raw storage from genesis config. Any @@ -144,7 +137,6 @@ enum GenesisBuildAction { #[derive(Parser, Debug, Clone)] struct PatchCmd { /// The path to the runtime genesis config patch. - #[arg(long, short)] patch_path: PathBuf, } @@ -152,7 +144,6 @@ struct PatchCmd { #[derive(Parser, Debug, Clone)] struct FullCmd { /// The path to the full runtime genesis config json file. - #[arg(long, short)] config_path: PathBuf, } @@ -163,161 +154,45 @@ struct FullCmd { struct DefaultCmd { /// If provided stores the default genesis config json file at given path (in addition to /// chain-spec). - #[arg(long, short)] default_config_path: Option, } -/// Edits provided input chain spec. Input can be converted into raw storage chain-spec. The code -/// can be updated with the runtime provided in the command line. +/// Updates the code in the provided input chain spec. +/// +/// The code field of the chain spec will be updated with the runtime provided in the +/// command line. This operation supports both plain and raw formats. #[derive(Parser, Debug, Clone)] -pub struct EditCmd { - /// Chain spec to be edited - #[arg(long, short)] +pub struct UpdateCodeCmd { + /// Chain spec to be updated. pub input_chain_spec: PathBuf, - /// The path to new runtime wasm blob to be stored into chain-spec - #[arg(long, short = 'r')] - pub runtime_wasm_path: Option, - /// Convert genesis spec to raw format - #[arg(long, short = 's')] - pub convert_to_raw: bool, + /// The path to new runtime wasm blob to be stored into chain-spec. + pub runtime_wasm_path: PathBuf, } -/// Verifies provided input chain spec. If the runtime is provided verification is performed against -/// new runtime. +/// Converts the given chain spec into the raw format. #[derive(Parser, Debug, Clone)] -pub struct VerifyCmd { - /// Chain spec to be edited - #[arg(long, short)] +pub struct ConvertToRawCmd { + /// Chain spec to be converted. pub input_chain_spec: PathBuf, - /// The path to new runtime wasm blob to be stored into chain-spec - #[arg(long, short = 'r')] - pub runtime_wasm_path: Option, -} - -/// Generate the chain spec using the given seeds and accounts. -pub fn generate_chain_spec( - authority_seeds: Vec, - nominator_accounts: Vec, - endowed_accounts: Vec, - sudo_account: String, -) -> Result { - let parse_account = |address: String| { - AccountId::from_string(&address) - .map_err(|err| format!("Failed to parse account address: {:?}", err)) - }; - - let nominator_accounts = nominator_accounts - .into_iter() - .map(parse_account) - .collect::, String>>()?; - - let endowed_accounts = endowed_accounts - .into_iter() - .map(parse_account) - .collect::, String>>()?; - - let sudo_account = parse_account(sudo_account)?; - - let authorities = authority_seeds - .iter() - .map(AsRef::as_ref) - .map(chain_spec::authority_keys_from_seed) - .collect::>(); - - chain_spec::ChainSpec::builder(kitchensink_runtime::wasm_binary_unwrap(), Default::default()) - .with_name("Custom") - .with_id("custom") - .with_chain_type(sc_chain_spec::ChainType::Live) - .with_genesis_config_patch(chain_spec::testnet_genesis( - authorities, - nominator_accounts, - sudo_account, - Some(endowed_accounts), - )) - .build() - .as_json(false) -} - -/// Generate the authority keys and store them in the given `keystore_path`. -pub fn generate_authority_keys_and_store( - seeds: &[String], - keystore_path: &Path, -) -> Result<(), String> { - for (n, seed) in seeds.iter().enumerate() { - let keystore: KeystorePtr = - LocalKeystore::open(keystore_path.join(format!("auth-{}", n)), None) - .map_err(|err| err.to_string())? - .into(); - - let (_, _, grandpa, babe, im_online, authority_discovery, mixnet) = - chain_spec::authority_keys_from_seed(seed); - - let insert_key = |key_type, public| { - keystore - .insert(key_type, &format!("//{}", seed), public) - .map_err(|_| format!("Failed to insert key: {}", grandpa)) - }; - - insert_key(sp_core::crypto::key_types::BABE, babe.as_slice())?; - - insert_key(sp_core::crypto::key_types::GRANDPA, grandpa.as_slice())?; - - insert_key(sp_core::crypto::key_types::IM_ONLINE, im_online.as_slice())?; - - insert_key( - sp_core::crypto::key_types::AUTHORITY_DISCOVERY, - authority_discovery.as_slice(), - )?; - - insert_key(sp_core::crypto::key_types::MIXNET, mixnet.as_slice())?; - } - - Ok(()) } -/// Print the given seeds -pub fn print_seeds( - authority_seeds: &[String], - nominator_seeds: &[String], - endowed_seeds: &[String], - sudo_seed: &str, -) { - let header = Style::new().bold().underline(); - let entry = Style::new().bold(); - - println!("{}", header.paint("Authority seeds")); - - for (n, seed) in authority_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("auth-{}:", n)), seed); - } - - println!("{}", header.paint("Nominator seeds")); - - for (n, seed) in nominator_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("nom-{}:", n)), seed); - } - - println!(); - - if !endowed_seeds.is_empty() { - println!("{}", header.paint("Endowed seeds")); - for (n, seed) in endowed_seeds.iter().enumerate() { - println!("{} //{}", entry.paint(format!("endowed-{}:", n)), seed); - } - - println!(); - } - - println!("{}", header.paint("Sudo seed")); - println!("//{}", sudo_seed); +/// Verifies the provided input chain spec. +/// +/// Silently checks if given input chain spec can be converted to raw. It allows to check if all +/// RuntimeGenesisConfig fiels are properly initialized and if the json does not contain invalid +/// fields. +#[derive(Parser, Debug, Clone)] +pub struct VerifyCmd { + /// Chain spec to be verified. + pub input_chain_spec: PathBuf, } -/// Processes `RuntimeCmd` and returns JSON version of `ChainSpec` -pub fn generate_chain_spec_for_runtime(cmd: &RuntimeCmd) -> Result { +/// Processes `CreateCmd` and returns JSON version of `ChainSpec`. +pub fn generate_chain_spec_for_runtime(cmd: &CreateCmd) -> Result { let code = fs::read(cmd.runtime_wasm_path.as_path()) .map_err(|e| format!("wasm blob shall be readable {e}"))?; - let builder = chain_spec::ChainSpec::builder(&code[..], Default::default()) + let builder = GenericChainSpec::<()>::builder(&code[..], Default::default()) .with_name(&cmd.chain_name[..]) .with_id(&cmd.chain_id[..]) .with_chain_type(sc_chain_spec::ChainType::Live); -- GitLab From 06190498a00ff913975fa8853c3c6a94783c4124 Mon Sep 17 00:00:00 2001 From: Julian Eager Date: Tue, 21 Nov 2023 19:10:59 +0800 Subject: [PATCH 087/199] Refactor `ValidationError` (#2406) --- .../node/core/candidate-validation/src/lib.rs | 60 +++++++++++-------- .../core/candidate-validation/src/tests.rs | 40 ++++++------- .../benches/host_prepare_rococo_runtime.rs | 3 +- .../node/core/pvf/common/src/executor_intf.rs | 19 +++--- polkadot/node/core/pvf/src/error.rs | 37 ++++++++---- polkadot/node/core/pvf/src/execute/queue.rs | 14 ++--- polkadot/node/core/pvf/src/host.rs | 39 +++++------- polkadot/node/core/pvf/src/lib.rs | 2 +- polkadot/node/core/pvf/tests/it/main.rs | 8 +-- polkadot/node/core/pvf/tests/it/process.rs | 10 ++-- 10 files changed, 122 insertions(+), 110 deletions(-) diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index f5d17af6c68..9f7b17f6129 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -24,8 +24,8 @@ #![warn(missing_docs)] use polkadot_node_core_pvf::{ - InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PrepareError, - PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, + InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PossiblyInvalidError, + PrepareError, PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, }; use polkadot_node_primitives::{ BlockData, InvalidCandidate, PoV, ValidationResult, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT, @@ -642,7 +642,7 @@ async fn validate_candidate_exhaustive( } match result { - Err(ValidationError::InternalError(e)) => { + Err(ValidationError::Internal(e)) => { gum::warn!( target: LOG_TARGET, ?para_id, @@ -651,34 +651,29 @@ async fn validate_candidate_exhaustive( ); Err(ValidationFailed(e.to_string())) }, - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout)) => + Err(ValidationError::Invalid(WasmInvalidCandidate::HardTimeout)) => Ok(ValidationResult::Invalid(InvalidCandidate::Timeout)), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::WorkerReportedInvalid(e))) => + Err(ValidationError::Invalid(WasmInvalidCandidate::WorkerReportedInvalid(e))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)) => + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError( "ambiguous worker death".to_string(), ))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError(err))) => + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(err))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath(err))) => + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(format!( "ambiguous job death: {err}" )))), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::PrepareError(e))) => { - // In principle if preparation of the `WASM` fails, the current candidate can not be the - // reason for that. So we can't say whether it is invalid or not. In addition, with - // pre-checking enabled only valid runtimes should ever get enacted, so we can be - // reasonably sure that this is some local problem on the current node. However, as this - // particular error *seems* to indicate a deterministic error, we raise a warning. + Err(ValidationError::Preparation(e)) => { gum::warn!( target: LOG_TARGET, ?para_id, ?e, "Deterministic error occurred during preparation (should have been ruled out by pre-checking phase)", ); - Err(ValidationFailed(e)) + Err(ValidationFailed(e.to_string())) }, Ok(res) => if res.head_data.hash() != candidate_receipt.descriptor.para_head { @@ -762,16 +757,31 @@ trait ValidationBackend { } match validation_result { - Err(ValidationError::InvalidCandidate( - WasmInvalidCandidate::AmbiguousWorkerDeath | - WasmInvalidCandidate::AmbiguousJobDeath(_), - )) if num_death_retries_left > 0 => num_death_retries_left -= 1, - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError(_))) - if num_job_error_retries_left > 0 => - num_job_error_retries_left -= 1, - Err(ValidationError::InternalError(_)) if num_internal_retries_left > 0 => - num_internal_retries_left -= 1, - _ => break, + Err(ValidationError::PossiblyInvalid( + PossiblyInvalidError::AmbiguousWorkerDeath | + PossiblyInvalidError::AmbiguousJobDeath(_), + )) => + if num_death_retries_left > 0 { + num_death_retries_left -= 1; + } else { + break; + }, + + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(_))) => + if num_job_error_retries_left > 0 { + num_job_error_retries_left -= 1; + } else { + break; + }, + + Err(ValidationError::Internal(_)) => + if num_internal_retries_left > 0 { + num_internal_retries_left -= 1; + } else { + break; + }, + + Ok(_) | Err(ValidationError::Invalid(_) | ValidationError::Preparation(_)) => break, } // If we got a possibly transient error, retry once after a brief delay, on the diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 5e2585d6873..11078580465 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -480,9 +480,9 @@ fn candidate_validation_bad_return_is_invalid() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, @@ -561,7 +561,7 @@ fn candidate_validation_one_ambiguous_error_is_valid() { let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), Ok(validation_result), ]), validation_data.clone(), @@ -602,8 +602,8 @@ fn candidate_validation_multiple_ambiguous_errors_is_invalid() { let v = executor::block_on(validate_candidate_exhaustive( MockValidateCandidateBackend::with_hardcoded_result_list(vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), ]), validation_data, validation_code, @@ -626,7 +626,7 @@ fn candidate_validation_retry_internal_errors() { vec![ Err(InternalValidationError::HostCommunication("foo".into()).into()), // Throw an AJD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousJobDeath( + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath( "baz".into(), ))), // Throw another internal error. @@ -644,7 +644,7 @@ fn candidate_validation_dont_retry_internal_errors() { vec![ Err(InternalValidationError::HostCommunication("foo".into()).into()), // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), // Throw another internal error. Err(InternalValidationError::HostCommunication("bar".into()).into()), ], @@ -659,11 +659,11 @@ fn candidate_validation_retry_panic_errors() { let v = candidate_validation_retry_on_error_helper( PvfExecKind::Approval, vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("foo".into()))), // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), // Throw another panic error. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("bar".into()))), ], ); @@ -676,11 +676,11 @@ fn candidate_validation_dont_retry_panic_errors() { let v = candidate_validation_retry_on_error_helper( PvfExecKind::Backing, vec![ - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("foo".into()))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("foo".into()))), // Throw an AWD error, we should still retry again. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), // Throw another panic error. - Err(ValidationError::InvalidCandidate(WasmInvalidCandidate::JobError("bar".into()))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError("bar".into()))), ], ); @@ -758,9 +758,9 @@ fn candidate_validation_timeout_is_internal_error() { let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, @@ -852,9 +852,9 @@ fn candidate_validation_code_mismatch_is_invalid() { let (_ctx, _ctx_handle) = test_helpers::make_subsystem_context::(pool.clone()); let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Err( - ValidationError::InvalidCandidate(WasmInvalidCandidate::HardTimeout), - )), + MockValidateCandidateBackend::with_hardcoded_result(Err(ValidationError::Invalid( + WasmInvalidCandidate::HardTimeout, + ))), validation_data, validation_code, candidate_receipt, diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index c02a0b595da..f2551b701c2 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -18,8 +18,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, Criterion, SamplingMode}; use polkadot_node_core_pvf::{ - start, testing, Config, Metrics, PrepareError, PrepareJobKind, PrepareStats, PvfPrepData, - ValidationHost, + start, testing, Config, Metrics, PrepareError, PrepareJobKind, PvfPrepData, ValidationHost, }; use polkadot_primitives::ExecutorParams; use rococo_runtime::WASM_BINARY; diff --git a/polkadot/node/core/pvf/common/src/executor_intf.rs b/polkadot/node/core/pvf/common/src/executor_intf.rs index 3a1d3ac1ba0..4bafc3f4291 100644 --- a/polkadot/node/core/pvf/common/src/executor_intf.rs +++ b/polkadot/node/core/pvf/common/src/executor_intf.rs @@ -140,8 +140,7 @@ pub unsafe fn create_runtime_from_artifact_bytes( executor_params: &ExecutorParams, ) -> Result { let mut config = DEFAULT_CONFIG.clone(); - config.semantics = - params_to_wasmtime_semantics(executor_params).map_err(|err| WasmError::Other(err))?; + config.semantics = params_to_wasmtime_semantics(executor_params); sc_executor_wasmtime::create_runtime_from_artifact_bytes::( compiled_artifact_blob, @@ -149,13 +148,12 @@ pub unsafe fn create_runtime_from_artifact_bytes( ) } -pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result { +pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Semantics { let mut sem = DEFAULT_CONFIG.semantics.clone(); - let mut stack_limit = if let Some(stack_limit) = sem.deterministic_stack_limit.clone() { - stack_limit - } else { - return Err("No default stack limit set".to_owned()) - }; + let mut stack_limit = sem + .deterministic_stack_limit + .expect("There is a comment to not change the default stack limit; it should always be available; qed") + .clone(); for p in par.iter() { match p { @@ -172,7 +170,7 @@ pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Result Result, sc_executor_common::error::WasmError> { - let semantics = params_to_wasmtime_semantics(executor_params) - .map_err(|e| sc_executor_common::error::WasmError::Other(e))?; + let semantics = params_to_wasmtime_semantics(executor_params); sc_executor_wasmtime::prepare_runtime_artifact(blob, &semantics) } diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 7fdb8c56ec9..442443f326e 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -19,31 +19,44 @@ use polkadot_node_core_pvf_common::error::{InternalValidationError, PrepareError /// A error raised during validation of the candidate. #[derive(Debug, Clone)] pub enum ValidationError { - /// The error was raised because the candidate is invalid. + /// Deterministic preparation issue. In practice, most of the problems should be caught by + /// prechecking, so this may be a sign of internal conditions. /// - /// Whenever we are unsure if the error was due to the candidate or not, we must vote invalid. - InvalidCandidate(InvalidCandidate), - /// Some internal error occurred. - InternalError(InternalValidationError), + /// In principle if preparation of the `WASM` fails, the current candidate cannot be the + /// reason for that. So we can't say whether it is invalid or not. In addition, with + /// pre-checking enabled only valid runtimes should ever get enacted, so we can be + /// reasonably sure that this is some local problem on the current node. However, as this + /// particular error *seems* to indicate a deterministic error, we raise a warning. + Preparation(PrepareError), + /// The error was raised because the candidate is invalid. Should vote against. + Invalid(InvalidCandidate), + /// Possibly transient issue that may resolve after retries. Should vote against when retries + /// fail. + PossiblyInvalid(PossiblyInvalidError), + /// Preparation or execution issue caused by an internal condition. Should not vote against. + Internal(InternalValidationError), } /// A description of an error raised during executing a PVF and can be attributed to the combination /// of the candidate [`polkadot_parachain_primitives::primitives::ValidationParams`] and the PVF. #[derive(Debug, Clone)] pub enum InvalidCandidate { - /// PVF preparation ended up with a deterministic error. - PrepareError(String), /// The candidate is reported to be invalid by the execution worker. The string contains the /// error message. WorkerReportedInvalid(String), + /// PVF execution (compilation is not included) took more time than was allotted. + HardTimeout, +} + +/// Possibly transient issue that may resolve after retries. +#[derive(Debug, Clone)] +pub enum PossiblyInvalidError { /// The worker process (not the job) has died during validation of a candidate. /// /// It's unlikely that this is caused by malicious code since workers spawn separate job /// processes, and those job processes are sandboxed. But, it is possible. We retry in this /// case, and if the error persists, we assume it's caused by the candidate and vote against. AmbiguousWorkerDeath, - /// PVF execution (compilation is not included) took more time than was allotted. - HardTimeout, /// The job process (not the worker) has died for one of the following reasons: /// /// (a) A seccomp violation occurred, most likely due to an attempt by malicious code to @@ -68,7 +81,7 @@ pub enum InvalidCandidate { impl From for ValidationError { fn from(error: InternalValidationError) -> Self { - Self::InternalError(error) + Self::Internal(error) } } @@ -77,9 +90,9 @@ impl From for ValidationError { // Here we need to classify the errors into two errors: deterministic and non-deterministic. // See [`PrepareError::is_deterministic`]. if error.is_deterministic() { - Self::InvalidCandidate(InvalidCandidate::PrepareError(error.to_string())) + Self::Preparation(error) } else { - Self::InternalError(InternalValidationError::NonDeterministicPrepareError(error)) + Self::Internal(InternalValidationError::NonDeterministicPrepareError(error)) } } } diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index 257377df3f4..a0c24fd4432 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -22,7 +22,7 @@ use crate::{ host::ResultSender, metrics::Metrics, worker_intf::{IdleWorker, WorkerHandle}, - InvalidCandidate, ValidationError, LOG_TARGET, + InvalidCandidate, PossiblyInvalidError, ValidationError, LOG_TARGET, }; use futures::{ channel::mpsc, @@ -342,27 +342,27 @@ fn handle_job_finish( }, Outcome::InvalidCandidate { err, idle_worker } => ( Some(idle_worker), - Err(ValidationError::InvalidCandidate(InvalidCandidate::WorkerReportedInvalid(err))), + Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, ), - Outcome::InternalError { err } => (None, Err(ValidationError::InternalError(err)), None), + Outcome::InternalError { err } => (None, Err(ValidationError::Internal(err)), None), // Either the worker or the job timed out. Kill the worker in either case. Treated as // definitely-invalid, because if we timed out, there's no time left for a retry. Outcome::HardTimeout => - (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)), None), + (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None), // "Maybe invalid" errors (will retry). Outcome::WorkerIntfErr => ( None, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, ), Outcome::JobDied { err } => ( None, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousJobDeath(err))), + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), None, ), Outcome::JobError { err } => - (None, Err(ValidationError::InvalidCandidate(InvalidCandidate::JobError(err))), None), + (None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err))), None), }; queue.metrics.execute_finished(); diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index f67934e4171..6049f51834e 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -873,7 +873,7 @@ fn pulse_every(interval: std::time::Duration) -> impl futures::Stream #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::InvalidCandidate; + use crate::PossiblyInvalidError; use assert_matches::assert_matches; use futures::future::BoxFuture; use polkadot_node_core_pvf_common::{ @@ -1211,27 +1211,27 @@ pub(crate) mod tests { ); result_tx_pvf_1_1 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_1_1.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); result_tx_pvf_1_2 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_1_2.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); result_tx_pvf_2 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_pvf_2.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); } @@ -1337,7 +1337,7 @@ pub(crate) mod tests { assert_matches!(result_rx.now_or_never().unwrap().unwrap(), Err(PrepareError::TimedOut)); assert_matches!( result_rx_execute.now_or_never().unwrap().unwrap(), - Err(ValidationError::InternalError(_)) + Err(ValidationError::Internal(_)) ); // Reversed case: first send multiple precheck requests, then ask for an execution. @@ -1479,7 +1479,7 @@ pub(crate) mod tests { // The result should contain the error. let result = test.poll_and_recv_result(result_rx).await; - assert_matches!(result, Err(ValidationError::InternalError(_))); + assert_matches!(result, Err(ValidationError::Internal(_))); // Submit another execute request. We shouldn't try to prepare again, yet. let (result_tx_2, result_rx_2) = oneshot::channel(); @@ -1498,7 +1498,7 @@ pub(crate) mod tests { // The result should contain the original error. let result = test.poll_and_recv_result(result_rx_2).await; - assert_matches!(result, Err(ValidationError::InternalError(_))); + assert_matches!(result, Err(ValidationError::Internal(_))); // Pause for enough time to reset the cooldown for this failed prepare request. futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; @@ -1538,11 +1538,11 @@ pub(crate) mod tests { // Send an error for the execution here, just so we can check the result receiver is still // alive. result_tx_3 - .send(Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath))) + .send(Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath))) .unwrap(); assert_matches!( result_rx_3.now_or_never().unwrap().unwrap(), - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); } @@ -1581,10 +1581,7 @@ pub(crate) mod tests { // The result should contain the error. let result = test.poll_and_recv_result(result_rx).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); // Submit another execute request. let (result_tx_2, result_rx_2) = oneshot::channel(); @@ -1603,10 +1600,7 @@ pub(crate) mod tests { // The result should contain the original error. let result = test.poll_and_recv_result(result_rx_2).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); // Pause for enough time to reset the cooldown for this failed prepare request. futures_timer::Delay::new(PREPARE_FAILURE_COOLDOWN).await; @@ -1628,10 +1622,7 @@ pub(crate) mod tests { // The result should still contain the original error. let result = test.poll_and_recv_result(result_rx_3).await; - assert_matches!( - result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::PrepareError(_))) - ); + assert_matches!(result, Err(ValidationError::Preparation(_))); } // Test that multiple heads-up requests trigger preparation retries if the first one failed. diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs index 7e7a1325254..3306a2461c1 100644 --- a/polkadot/node/core/pvf/src/lib.rs +++ b/polkadot/node/core/pvf/src/lib.rs @@ -103,7 +103,7 @@ mod worker_intf; #[cfg(feature = "test-utils")] pub mod testing; -pub use error::{InvalidCandidate, ValidationError}; +pub use error::{InvalidCandidate, PossiblyInvalidError, ValidationError}; pub use host::{start, Config, ValidationHost, EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}; pub use metrics::Metrics; pub use priority::Priority; diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 5bdf49cc719..4c81ac502dd 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -162,7 +162,7 @@ async fn execute_job_terminates_on_timeout() { .await; match result { - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) => {}, + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, r => panic!("{:?}", r), } @@ -202,8 +202,8 @@ async fn ensure_parallel_execution() { assert_matches!( (res1, res2), ( - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)), - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) ) ); @@ -344,7 +344,7 @@ async fn deleting_prepared_artifact_does_not_dispute() { .await; match result { - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) => {}, + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) => {}, r => panic!("{:?}", r), } } diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs index 725d060ab91..075d94373df 100644 --- a/polkadot/node/core/pvf/tests/it/process.rs +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -19,7 +19,9 @@ use super::TestHost; use assert_matches::assert_matches; -use polkadot_node_core_pvf::{InvalidCandidate, PrepareError, ValidationError}; +use polkadot_node_core_pvf::{ + InvalidCandidate, PossiblyInvalidError, PrepareError, ValidationError, +}; use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams}; use procfs::process; use rusty_fork::rusty_fork_test; @@ -151,7 +153,7 @@ rusty_fork_test! { assert_matches!( result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::HardTimeout)) + Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)) ); }) } @@ -217,7 +219,7 @@ rusty_fork_test! { assert_matches!( result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousWorkerDeath)) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) ); }) } @@ -288,7 +290,7 @@ rusty_fork_test! { // Note that we get a more specific error if the job died than if the whole worker died. assert_matches!( result, - Err(ValidationError::InvalidCandidate(InvalidCandidate::AmbiguousJobDeath(err))) + Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))) if err == "received signal: SIGKILL" ); }) -- GitLab From 40afc77c4e6efff56649174a35384499f601f8d4 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Tue, 21 Nov 2023 08:40:55 -0300 Subject: [PATCH 088/199] Add output positional arg to undying-collator cli (#2375) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow up from https://github.com/paritytech/polkadot-sdk/pull/2370 to add `output` positional argument to undying-collator. cc @JoshOrndorff --------- Co-authored-by: Bastian Köcher --- .../undying/collator/src/cli.rs | 11 +++++++- .../undying/collator/src/main.rs | 26 ++++++++++++++++--- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/polkadot/parachain/test-parachains/undying/collator/src/cli.rs b/polkadot/parachain/test-parachains/undying/collator/src/cli.rs index cd16133dbf1..d04122f2f68 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/cli.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/cli.rs @@ -18,6 +18,7 @@ use clap::Parser; use sc_cli::SubstrateCli; +use std::path::PathBuf; /// Sub-commands supported by the collator. #[derive(Debug, Parser)] @@ -34,6 +35,10 @@ pub enum Subcommand { /// Command for exporting the genesis state of the parachain #[derive(Debug, Parser)] pub struct ExportGenesisStateCommand { + /// Output file name or stdout if unspecified. + #[arg()] + pub output: Option, + /// Id of the parachain this collator collates for. #[arg(long, default_value_t = 100)] pub parachain_id: u32, @@ -50,7 +55,11 @@ pub struct ExportGenesisStateCommand { /// Command for exporting the genesis wasm file. #[derive(Debug, Parser)] -pub struct ExportGenesisWasmCommand {} +pub struct ExportGenesisWasmCommand { + /// Output file name or stdout if unspecified. + #[arg()] + pub output: Option, +} #[allow(missing_docs)] #[derive(Debug, Parser)] diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index e564e221f01..c6b338a20dc 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -22,6 +22,10 @@ use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProt use polkadot_primitives::Id as ParaId; use sc_cli::{Error as SubstrateCliError, SubstrateCli}; use sp_core::hexdisplay::HexDisplay; +use std::{ + fs, + io::{self, Write}, +}; use test_parachain_undying_collator::Collator; mod cli; @@ -35,14 +39,30 @@ fn main() -> Result<()> { // `pov_size` and `pvf_complexity` need to match the ones that we start the collator // with. let collator = Collator::new(params.pov_size, params.pvf_complexity); - println!("0x{:?}", HexDisplay::from(&collator.genesis_head())); + + let output_buf = + format!("0x{:?}", HexDisplay::from(&collator.genesis_head())).into_bytes(); + + if let Some(output) = params.output { + std::fs::write(output, output_buf)?; + } else { + std::io::stdout().write_all(&output_buf)?; + } Ok::<_, Error>(()) }, - Some(cli::Subcommand::ExportGenesisWasm(_params)) => { + Some(cli::Subcommand::ExportGenesisWasm(params)) => { // We pass some dummy values for `pov_size` and `pvf_complexity` as these don't // matter for `wasm` export. - println!("0x{:?}", HexDisplay::from(&Collator::default().validation_code())); + let output_buf = + format!("0x{:?}", HexDisplay::from(&Collator::default().validation_code())) + .into_bytes(); + + if let Some(output) = params.output { + fs::write(output, output_buf)?; + } else { + io::stdout().write_all(&output_buf)?; + } Ok(()) }, -- GitLab From 552be4800d9e4b480f79a300fc531783e04be099 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 21 Nov 2023 12:52:46 +0100 Subject: [PATCH 089/199] PVF worker: switch on seccomp networking restrictions (#2221) --- polkadot/node/core/pvf/common/src/error.rs | 7 +- polkadot/node/core/pvf/common/src/execute.rs | 2 +- polkadot/node/core/pvf/common/src/lib.rs | 2 +- .../node/core/pvf/common/src/worker/mod.rs | 2 +- .../pvf/common/src/worker/security/seccomp.rs | 10 +- .../node/core/pvf/execute-worker/src/lib.rs | 26 ++-- .../node/core/pvf/prepare-worker/src/lib.rs | 24 ++-- .../node/core/pvf/src/execute/worker_intf.rs | 111 ++++++++++-------- polkadot/node/core/pvf/src/prepare/pool.rs | 4 +- .../node/core/pvf/src/prepare/worker_intf.rs | 45 ++++--- polkadot/node/core/pvf/src/security.rs | 75 ++++++++---- polkadot/node/core/pvf/tests/it/process.rs | 2 +- .../src/node/utility/pvf-host-and-workers.md | 38 +++--- 13 files changed, 202 insertions(+), 146 deletions(-) diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index 6bf05ece78e..7db7f9a5945 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -77,7 +77,7 @@ pub enum PrepareError { #[codec(index = 9)] ClearWorkerDir(String), /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. - JobDied(String), + JobDied { err: String, job_pid: i32 }, #[codec(index = 10)] /// Some error occurred when interfacing with the kernel. #[codec(index = 11)] @@ -96,7 +96,7 @@ impl PrepareError { match self { Prevalidation(_) | Preparation(_) | JobError(_) | OutOfMemory => true, IoErr(_) | - JobDied(_) | + JobDied { .. } | CreateTmpFile(_) | RenameTmpFile { .. } | ClearWorkerDir(_) | @@ -119,7 +119,8 @@ impl fmt::Display for PrepareError { JobError(err) => write!(f, "panic: {}", err), TimedOut => write!(f, "prepare: timeout"), IoErr(err) => write!(f, "prepare: io error while receiving response: {}", err), - JobDied(err) => write!(f, "prepare: prepare job died: {}", err), + JobDied { err, job_pid } => + write!(f, "prepare: prepare job with pid {job_pid} died: {err}"), CreateTmpFile(err) => write!(f, "prepare: error creating tmp file: {}", err), RenameTmpFile { err, src, dest } => write!(f, "prepare: error renaming tmp file ({:?} -> {:?}): {}", src, dest, err), diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 89e7c8e471a..aa1c1c53968 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -46,7 +46,7 @@ pub enum WorkerResponse { /// /// We cannot treat this as an internal error because malicious code may have killed the job. /// We still retry it, because in the non-malicious case it is likely spurious. - JobDied(String), + JobDied { err: String, job_pid: i32 }, /// An unexpected error occurred in the job process, e.g. failing to spawn a thread, panic, /// etc. /// diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index 282d2f7c41d..278fa3fe821 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -53,7 +53,7 @@ pub struct SecurityStatus { pub can_enable_landlock: bool, /// Whether the seccomp features we use are fully available on this system. pub can_enable_seccomp: bool, - // Whether we are able to unshare the user namespace and change the filesystem root. + /// Whether we are able to unshare the user namespace and change the filesystem root. pub can_unshare_user_namespace_and_change_root: bool, } diff --git a/polkadot/node/core/pvf/common/src/worker/mod.rs b/polkadot/node/core/pvf/common/src/worker/mod.rs index 86f47acccac..d4f9bbc27ea 100644 --- a/polkadot/node/core/pvf/common/src/worker/mod.rs +++ b/polkadot/node/core/pvf/common/src/worker/mod.rs @@ -219,7 +219,7 @@ pub fn run_worker( #[cfg_attr(not(target_os = "linux"), allow(unused_mut))] mut worker_dir_path: PathBuf, node_version: Option<&str>, worker_version: Option<&str>, - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] security_status: &SecurityStatus, + security_status: &SecurityStatus, mut event_loop: F, ) where F: FnMut(UnixStream, PathBuf) -> io::Result, diff --git a/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs b/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs index 5539ad28440..c3822d3c4c6 100644 --- a/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs +++ b/polkadot/node/core/pvf/common/src/worker/security/seccomp.rs @@ -67,11 +67,9 @@ //! //! # Action on syscall violations //! -//! On syscall violations we currently only log, to make sure this works correctly before enforcing. -//! -//! In the future, when a forbidden syscall is attempted we immediately kill the process in order to -//! prevent the attacker from doing anything else. In execution, this will result in voting against -//! the candidate. +//! When a forbidden syscall is attempted we immediately kill the process in order to prevent the +//! attacker from doing anything else. In execution, this will result in voting against the +//! candidate. use crate::{ worker::{stringify_panic_payload, WorkerKind}, @@ -82,7 +80,7 @@ use std::{collections::BTreeMap, path::Path}; /// The action to take on caught syscalls. #[cfg(not(test))] -const CAUGHT_ACTION: SeccompAction = SeccompAction::Log; +const CAUGHT_ACTION: SeccompAction = SeccompAction::KillProcess; /// Don't kill the process when testing. #[cfg(test)] const CAUGHT_ACTION: SeccompAction = SeccompAction::Errno(libc::EACCES as u32); diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 9ec811686b8..d82a8fca65d 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -222,12 +222,6 @@ pub fn worker_entrypoint( }, }; - gum::trace!( - target: LOG_TARGET, - %worker_pid, - "worker: sending response to host: {:?}", - response - ); send_response(&mut stream, response)?; } }, @@ -360,7 +354,7 @@ fn handle_child_process( /// - The response, either `Ok` or some error state. fn handle_parent_process( mut pipe_read: PipeReader, - child: Pid, + job_pid: Pid, worker_pid: u32, usage_before: Usage, timeout: Duration, @@ -373,10 +367,11 @@ fn handle_parent_process( // Should retry at any rate. .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; - let status = nix::sys::wait::waitpid(child, None); + let status = nix::sys::wait::waitpid(job_pid, None); gum::trace!( target: LOG_TARGET, %worker_pid, + %job_pid, "execute worker received wait status from job: {:?}", status, ); @@ -396,6 +391,7 @@ fn handle_parent_process( gum::warn!( target: LOG_TARGET, %worker_pid, + %job_pid, "execute job took {}ms cpu time, exceeded execute timeout {}ms", cpu_tv.as_millis(), timeout.as_millis(), @@ -428,6 +424,7 @@ fn handle_parent_process( gum::warn!( target: LOG_TARGET, %worker_pid, + %job_pid, "execute job error: {}", job_error, ); @@ -443,15 +440,18 @@ fn handle_parent_process( // // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some // other reason, so we still need to check for seccomp violations elsewhere. - Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => - Ok(WorkerResponse::JobDied(format!("received signal: {signal:?}"))), + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Ok(WorkerResponse::JobDied { + err: format!("received signal: {signal:?}"), + job_pid: job_pid.as_raw(), + }), Err(errno) => Ok(internal_error_from_errno("waitpid", errno)), // It is within an attacker's power to send an unexpected exit status. So we cannot treat // this as an internal error (which would make us abstain), but must vote against. - Ok(unexpected_wait_status) => Ok(WorkerResponse::JobDied(format!( - "unexpected status from wait: {unexpected_wait_status:?}" - ))), + Ok(unexpected_wait_status) => Ok(WorkerResponse::JobDied { + err: format!("unexpected status from wait: {unexpected_wait_status:?}"), + job_pid: job_pid.as_raw(), + }), } } diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index 34e6a78c26a..499e87c6e20 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -257,9 +257,9 @@ pub fn worker_entrypoint( handle_parent_process( pipe_reader, + worker_pid, child, temp_artifact_dest.clone(), - worker_pid, usage_before, preparation_timeout, ) @@ -506,9 +506,9 @@ fn handle_child_process( /// - If the child process timeout, it returns `PrepareError::TimedOut`. fn handle_parent_process( mut pipe_read: PipeReader, - child: Pid, - temp_artifact_dest: PathBuf, worker_pid: u32, + job_pid: Pid, + temp_artifact_dest: PathBuf, usage_before: Usage, timeout: Duration, ) -> Result { @@ -518,10 +518,11 @@ fn handle_parent_process( .read_to_end(&mut received_data) .map_err(|err| PrepareError::IoErr(err.to_string()))?; - let status = nix::sys::wait::waitpid(child, None); + let status = nix::sys::wait::waitpid(job_pid, None); gum::trace!( target: LOG_TARGET, %worker_pid, + %job_pid, "prepare worker received wait status from job: {:?}", status, ); @@ -539,6 +540,7 @@ fn handle_parent_process( gum::warn!( target: LOG_TARGET, %worker_pid, + %job_pid, "prepare job took {}ms cpu time, exceeded prepare timeout {}ms", cpu_tv.as_millis(), timeout.as_millis(), @@ -573,6 +575,7 @@ fn handle_parent_process( gum::debug!( target: LOG_TARGET, %worker_pid, + %job_pid, "worker: writing artifact to {}", temp_artifact_dest.display(), ); @@ -593,15 +596,18 @@ fn handle_parent_process( // // The job gets SIGSYS on seccomp violations, but this signal may have been sent for some // other reason, so we still need to check for seccomp violations elsewhere. - Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => - Err(PrepareError::JobDied(format!("received signal: {signal:?}"))), + Ok(WaitStatus::Signaled(_pid, signal, _core_dump)) => Err(PrepareError::JobDied { + err: format!("received signal: {signal:?}"), + job_pid: job_pid.as_raw(), + }), Err(errno) => Err(error_from_errno("waitpid", errno)), // An attacker can make the child process return any exit status it wants. So we can treat // all unexpected cases the same way. - Ok(unexpected_wait_status) => Err(PrepareError::JobDied(format!( - "unexpected status from wait: {unexpected_wait_status:?}" - ))), + Ok(unexpected_wait_status) => Err(PrepareError::JobDied { + err: format!("unexpected status from wait: {unexpected_wait_status:?}"), + job_pid: job_pid.as_raw(), + }), } } diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_intf.rs index bf44ba01725..4faaf13e62f 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_intf.rs @@ -33,7 +33,7 @@ use polkadot_node_core_pvf_common::{ execute::{Handshake, WorkerResponse}, worker_dir, SecurityStatus, }; -use polkadot_parachain_primitives::primitives::ValidationResult; +use polkadot_parachain_primitives::primitives::{ValidationCodeHash, ValidationResult}; use polkadot_primitives::ExecutorParams; use std::{path::Path, time::Duration}; use tokio::{io, net::UnixStream}; @@ -156,6 +156,16 @@ pub async fn start_work( let response = futures::select! { response = recv_response(&mut stream).fuse() => { match response { + Ok(response) => + handle_response( + response, + pid, + &artifact.id.code_hash, + &artifact_path, + execution_timeout, + audit_log_file + ) + .await, Err(error) => { gum::warn!( target: LOG_TARGET, @@ -164,56 +174,9 @@ pub async fn start_work( ?error, "failed to recv an execute response", ); - // The worker died. Check if it was due to a seccomp violation. - // - // NOTE: Log, but don't change the outcome. Not all validators may have - // auditing enabled, so we don't want attackers to abuse a non-deterministic - // outcome. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - validation_code_hash = ?artifact.id.code_hash, - ?artifact_path, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } return Outcome::WorkerIntfErr }, - Ok(response) => { - // Check if any syscall violations occurred during the job. For now this is - // only informative, as we are not enforcing the seccomp policy yet. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - validation_code_hash = ?artifact.id.code_hash, - ?artifact_path, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - - if let WorkerResponse::Ok{duration, ..} = response { - if duration > execution_timeout { - // The job didn't complete within the timeout. - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - "execute job took {}ms cpu time, exceeded execution timeout {}ms.", - duration.as_millis(), - execution_timeout.as_millis(), - ); - - // Return a timeout error. - return Outcome::HardTimeout - } - } - - response - }, } }, _ = Delay::new(timeout).fuse() => { @@ -238,7 +201,7 @@ pub async fn start_work( idle_worker: IdleWorker { stream, pid, worker_dir }, }, WorkerResponse::JobTimedOut => Outcome::HardTimeout, - WorkerResponse::JobDied(err) => Outcome::JobDied { err }, + WorkerResponse::JobDied { err, job_pid: _ } => Outcome::JobDied { err }, WorkerResponse::JobError(err) => Outcome::JobError { err }, WorkerResponse::InternalError(err) => Outcome::InternalError { err }, @@ -247,6 +210,56 @@ pub async fn start_work( .await } +/// Handles the case where we successfully received response bytes on the host from the child. +/// +/// Here we know the artifact exists, but is still located in a temporary file which will be cleared +/// by [`with_worker_dir_setup`]. +async fn handle_response( + response: WorkerResponse, + worker_pid: u32, + validation_code_hash: &ValidationCodeHash, + artifact_path: &Path, + execution_timeout: Duration, + audit_log_file: Option, +) -> WorkerResponse { + if let WorkerResponse::Ok { duration, .. } = response { + if duration > execution_timeout { + // The job didn't complete within the timeout. + gum::warn!( + target: LOG_TARGET, + worker_pid, + "execute job took {}ms cpu time, exceeded execution timeout {}ms.", + duration.as_millis(), + execution_timeout.as_millis(), + ); + + // Return a timeout error. + return WorkerResponse::JobTimedOut + } + } + + if let WorkerResponse::JobDied { err: _, job_pid } = response { + // The job died. Check if it was due to a seccomp violation. + // + // NOTE: Log, but don't change the outcome. Not all validators may have + // auditing enabled, so we don't want attackers to abuse a non-deterministic + // outcome. + for syscall in security::check_seccomp_violations_for_job(audit_log_file, job_pid).await { + gum::error!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + %syscall, + ?validation_code_hash, + ?artifact_path, + "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" + ); + } + } + + response +} + /// Create a temporary file for an artifact in the worker cache, execute the given future/closure /// passing the file path in, and clean up the worker cache. /// diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 21af21e5b02..4901be9fe1b 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -388,14 +388,14 @@ fn handle_mux( Ok(()) }, // The worker might still be usable, but we kill it just in case. - Outcome::JobDied(err) => { + Outcome::JobDied { err, job_pid } => { if attempt_retire(metrics, spawned, worker) { reply( from_pool, FromPool::Concluded { worker, rip: true, - result: Err(PrepareError::JobDied(err)), + result: Err(PrepareError::JobDied { err, job_pid }), }, )?; } diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index e7f142a46bb..318aea7295d 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -104,7 +104,7 @@ pub enum Outcome { /// The preparation job process died, due to OOM, a seccomp violation, or some other factor. /// /// The worker might still be usable, but we kill it just in case. - JobDied(String), + JobDied { err: String, job_pid: i32 }, } /// Given the idle token of a worker and parameters of work, communicates with the worker and @@ -160,19 +160,7 @@ pub async fn start_work( match result { // Received bytes from worker within the time limit. - Ok(Ok(prepare_worker_result)) => { - // Check if any syscall violations occurred during the job. For now this is only - // informative, as we are not enforcing the seccomp policy yet. - for syscall in security::check_seccomp_violations_for_worker(audit_log_file, pid).await { - gum::error!( - target: LOG_TARGET, - worker_pid = %pid, - %syscall, - ?pvf, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - + Ok(Ok(prepare_worker_result)) => handle_response( metrics, IdleWorker { stream, pid, worker_dir }, @@ -182,9 +170,9 @@ pub async fn start_work( &pvf, &cache_path, preparation_timeout, + audit_log_file, ) - .await - }, + .await, Ok(Err(err)) => { // Communication error within the time limit. gum::warn!( @@ -221,15 +209,36 @@ async fn handle_response( worker_pid: u32, tmp_file: PathBuf, pvf: &PvfPrepData, - cache_path: &PathBuf, + cache_path: &Path, preparation_timeout: Duration, + audit_log_file: Option, ) -> Outcome { let PrepareWorkerSuccess { checksum, stats: PrepareStats { cpu_time_elapsed, memory_stats } } = match result.clone() { Ok(result) => result, // Timed out on the child. This should already be logged by the child. Err(PrepareError::TimedOut) => return Outcome::TimedOut, - Err(PrepareError::JobDied(err)) => return Outcome::JobDied(err), + Err(PrepareError::JobDied { err, job_pid }) => { + // The job died. Check if it was due to a seccomp violation. + // + // NOTE: Log, but don't change the outcome. Not all validators may have + // auditing enabled, so we don't want attackers to abuse a non-deterministic + // outcome. + for syscall in + security::check_seccomp_violations_for_job(audit_log_file, job_pid).await + { + gum::error!( + target: LOG_TARGET, + %worker_pid, + %job_pid, + %syscall, + ?pvf, + "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" + ); + } + + return Outcome::JobDied { err, job_pid } + }, Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, Err(err) => return Outcome::Concluded { worker, result: Err(err) }, }; diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs index 0c0c5f40166..8c06c68392f 100644 --- a/polkadot/node/core/pvf/src/security.rs +++ b/polkadot/node/core/pvf/src/security.rs @@ -178,9 +178,15 @@ async fn check_can_unshare_user_namespace_and_change_root( let stderr = std::str::from_utf8(&output.stderr) .expect("child process writes a UTF-8 string to stderr; qed") .trim(); - Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( - format!("not available: {}", stderr) - )) + if stderr.is_empty() { + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + "not available".into() + )) + } else { + Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("not available: {}", stderr) + )) + } }, Err(err) => Err(SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( @@ -208,16 +214,25 @@ async fn check_landlock( if #[cfg(target_os = "linux")] { match tokio::process::Command::new(prepare_worker_program_path) .arg("--check-can-enable-landlock") - .status() + .output() .await { - Ok(status) if status.success() => Ok(()), - Ok(_status) => { + Ok(output) if output.status.success() => Ok(()), + Ok(output) => { let abi = polkadot_node_core_pvf_common::worker::security::landlock::LANDLOCK_ABI as u8; - Err(SecureModeError::CannotEnableLandlock( - format!("landlock ABI {} not available", abi) - )) + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + if stderr.is_empty() { + Err(SecureModeError::CannotEnableLandlock( + format!("landlock ABI {} not available", abi) + )) + } else { + Err(SecureModeError::CannotEnableLandlock( + format!("not available: {}", stderr) + )) + } }, Err(err) => Err(SecureModeError::CannotEnableLandlock( @@ -238,7 +253,7 @@ async fn check_landlock( /// to running the check in a worker, we try it... in a worker. The expected return status is 0 on /// success and -1 on failure. async fn check_seccomp( - #[cfg_attr(not(target_os = "linux"), allow(unused_variables))] + #[cfg_attr(not(all(target_os = "linux", target_arch = "x86_64")), allow(unused_variables))] prepare_worker_program_path: &Path, ) -> SecureModeResult { cfg_if::cfg_if! { @@ -247,14 +262,24 @@ async fn check_seccomp( if #[cfg(target_arch = "x86_64")] { match tokio::process::Command::new(prepare_worker_program_path) .arg("--check-can-enable-seccomp") - .status() + .output() .await { - Ok(status) if status.success() => Ok(()), - Ok(_status) => - Err(SecureModeError::CannotEnableSeccomp( - "not available".into() - )), + Ok(output) if output.status.success() => Ok(()), + Ok(output) => { + let stderr = std::str::from_utf8(&output.stderr) + .expect("child process writes a UTF-8 string to stderr; qed") + .trim(); + if stderr.is_empty() { + Err(SecureModeError::CannotEnableSeccomp( + "not available".into() + )) + } else { + Err(SecureModeError::CannotEnableSeccomp( + format!("not available: {}", stderr) + )) + } + }, Err(err) => Err(SecureModeError::CannotEnableSeccomp( format!("could not start child process: {}", err) @@ -320,25 +345,25 @@ impl AuditLogFile { } } -/// Check if a seccomp violation occurred for the given worker. As the syslog may be in a different -/// location, or seccomp auditing may be disabled, this function provides a best-effort attempt -/// only. +/// Check if a seccomp violation occurred for the given job process. As the syslog may be in a +/// different location, or seccomp auditing may be disabled, this function provides a best-effort +/// attempt only. /// /// The `audit_log_file` must have been obtained before the job started. It only allows reading /// entries that were written since it was obtained, so that we do not consider events from previous /// processes with the same pid. This can still be racy, but it's unlikely and fine for a /// best-effort attempt. -pub async fn check_seccomp_violations_for_worker( +pub async fn check_seccomp_violations_for_job( audit_log_file: Option, - worker_pid: u32, + job_pid: i32, ) -> Vec { - let audit_event_pid_field = format!("pid={worker_pid}"); + let audit_event_pid_field = format!("pid={job_pid}"); let audit_log_file = match audit_log_file { Some(file) => { - gum::debug!( + gum::trace!( target: LOG_TARGET, - %worker_pid, + %job_pid, audit_log_path = ?file.path, "checking audit log for seccomp violations", ); @@ -347,7 +372,7 @@ pub async fn check_seccomp_violations_for_worker( None => { gum::warn!( target: LOG_TARGET, - %worker_pid, + %job_pid, "could not open either {AUDIT_LOG_PATH} or {SYSLOG_PATH} for reading audit logs" ); return vec![] diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs index 075d94373df..b742acb15d0 100644 --- a/polkadot/node/core/pvf/tests/it/process.rs +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -248,7 +248,7 @@ rusty_fork_test! { // Note that we get a more specific error if the job died than if the whole worker died. assert_matches!( result, - Err(PrepareError::JobDied(err)) if err == "received signal: SIGKILL" + Err(PrepareError::JobDied{ err, job_pid: _ }) if err == "received signal: SIGKILL" ); }) } diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index 74d88ba3ad9..56bdd48bc0c 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -38,16 +38,22 @@ getting backed and honest backers getting slashed. We currently know of the following specific cases that will lead to a retried execution request: -1. **OOM:** The host might have been temporarily low on memory due to other - processes running on the same machine. **NOTE:** This case will lead to - voting against the candidate (and possibly a dispute) if the retry is still - not successful. -2. **Artifact missing:** The prepared artifact might have been deleted due to +1. **OOM:** We have memory limits to try to prevent attackers from exhausting + host memory. If the memory limit is hit, we kill the job process and retry + the job. Alternatively, the host might have been temporarily low on memory + due to other processes running on the same machine. **NOTE:** This case will + lead to voting against the candidate (and possibly a dispute) if the retry is + still not successful. +2. **Syscall violations:** If the job attempts a system call that is blocked by + the sandbox's security policy, the job process is immediately killed and we + retry. **NOTE:** In the future, if we have a proper way to detect that the + job died due to a security violation, it might make sense not to retry in + this case. +3. **Artifact missing:** The prepared artifact might have been deleted due to operator error or some bug in the system. -3. **Job errors:** For example, the worker thread panicked for some - indeterminate reason, which may or may not be independent of the candidate or - PVF. -4. **Internal errors:** See "Internal Errors" section. In this case, after the +4. **Job errors:** For example, the job process panicked for some indeterminate + reason, which may or may not be independent of the candidate or PVF. +5. **Internal errors:** See "Internal Errors" section. In this case, after the retry we abstain from voting. ### Preparation timeouts @@ -159,16 +165,14 @@ data on the host machine. *Currently this is only supported on Linux.* - +### Restricting networking - +We also disable networking on PVF threads by disabling certain syscalls, such as +the creation of sockets. This prevents attackers from either downloading +payloads or communicating sensitive data from the validator's machine to the +outside world. - - - - - - +*Currently this is only supported on Linux.* ### Clearing env vars -- GitLab From b25d29a50247f284cde5b24e270b6db7b4a81e7e Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 21 Nov 2023 15:22:42 +0100 Subject: [PATCH 090/199] cumulus-consensus-common: block import: `delayed_best_block` flag added (#2001) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds the `delayed_best_block` flag to `ParachainBlockImport`. If not set, the `params.fork_choice` is not updated (to `ForkChoiceStrategy::Custom`) during the block import. When `delayed_best_block` is set to `false` all parachain blocks on the [longest fork](https://github.com/paritytech/polkadot-sdk/blob/552be4800d9e4b480f79a300fc531783e04be099/substrate/client/service/src/client/client.rs#L708-L709) will be notified as the best block, allowing transaction pool to be updated with every imported block. Otherwise imported blocks will not be notified as best blocks (`fork_choice=ForkChoiceStrategy::Custom(false)`), and transaction pool would be updated only with best block received from relay-chain. Improvement for: #1202 --------- Co-authored-by: Bastian Köcher --- cumulus/client/consensus/common/src/lib.rs | 39 +++++++++++++++----- cumulus/client/consensus/common/src/tests.rs | 6 ++- 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index 08bceabb2bd..cebe34e7ea5 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -111,12 +111,15 @@ impl ParachainConsensus for Box + Send + /// Parachain specific block import. /// -/// This is used to set `block_import_params.fork_choice` to `false` as long as the block origin is -/// not `NetworkInitialSync`. The best block for parachains is determined by the relay chain. -/// Meaning we will update the best block, as it is included by the relay-chain. +/// Specialized block import for parachains. It supports to delay setting the best block until the +/// relay chain has included a candidate in its best block. By default the delayed best block +/// setting is disabled. The block import also monitors the imported blocks and prunes by default if +/// there are too many blocks at the same height. Too many blocks at the same height can for example +/// happen if the relay chain is rejecting the parachain blocks in the validation. pub struct ParachainBlockImport { inner: BI, monitor: Option>>, + delayed_best_block: bool, } impl> ParachainBlockImport { @@ -141,13 +144,27 @@ impl> ParachainBlockImport let monitor = level_limit.map(|level_limit| SharedData::new(LevelMonitor::new(level_limit, backend))); - Self { inner, monitor } + Self { inner, monitor, delayed_best_block: false } + } + + /// Create a new instance which delays setting the best block. + /// + /// The number of leaves per level limit is set to `LevelLimit::Default`. + pub fn new_with_delayed_best_block(inner: BI, backend: Arc) -> Self { + Self { + delayed_best_block: true, + ..Self::new_with_limit(inner, backend, LevelLimit::Default) + } } } impl Clone for ParachainBlockImport { fn clone(&self) -> Self { - ParachainBlockImport { inner: self.inner.clone(), monitor: self.monitor.clone() } + ParachainBlockImport { + inner: self.inner.clone(), + monitor: self.monitor.clone(), + delayed_best_block: self.delayed_best_block, + } } } @@ -182,11 +199,13 @@ where params.finalized = true; } - // Best block is determined by the relay chain, or if we are doing the initial sync - // we import all blocks as new best. - params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( - params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, - )); + if self.delayed_best_block { + // Best block is determined by the relay chain, or if we are doing the initial sync + // we import all blocks as new best. + params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( + params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, + )); + } let maybe_lock = self.monitor.as_ref().map(|monitor_lock| { let mut monitor = monitor_lock.shared_data_locked(); diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 9658a0add79..597d1ab2acc 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -1124,7 +1124,8 @@ fn find_potential_parents_aligned_with_pending() { let backend = Arc::new(Backend::new_test(1000, 1)); let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); - let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let relay_parent = relay_hash_from_block_num(10); // Choose different relay parent for alternative chain to get new hashes. @@ -1279,7 +1280,8 @@ fn find_potential_parents_aligned_no_pending() { let backend = Arc::new(Backend::new_test(1000, 1)); let client = Arc::new(TestClientBuilder::with_backend(backend.clone()).build()); - let mut para_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let mut para_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let relay_parent = relay_hash_from_block_num(10); // Choose different relay parent for alternative chain to get new hashes. -- GitLab From b3841b6b71cd3707d80e90492059d1a2e3bd33a6 Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Tue, 21 Nov 2023 16:09:40 +0100 Subject: [PATCH 091/199] Different XCM builders, default one requires fee payment (#2253) Adding on top of the new builder pattern for creating XCM programs, I'm adding some more APIs: ```rust let paying_fees: Xcm<()> = Xcm::builder() // Only allow paying for fees .withdraw_asset() // First instruction has to load the holding register .buy_execution() // Second instruction has to be `buy_execution` .build(); let paying_fees_invalid: Xcm<()> = Xcm::builder() .withdraw_asset() .build(); // Invalid, need to pay for fees let not_paying_fees: Xcm<()> = Xcm::builder_unpaid() .unpaid_execution() // Needed .withdraw_asset() .deposit_asset() .build(); let all_goes: Xcm<()> = Xcm::builder_unsafe() // You can do anything .withdraw_asset() .deposit_asset() .build(); ``` The invalid bits are because the methods don't even exist on the types that you'd want to call them on. --------- Co-authored-by: command-bot <> --- Cargo.lock | 1 + polkadot/xcm/procedural/Cargo.toml | 1 + .../xcm/procedural/src/builder_pattern.rs | 287 ++++++++++++++++-- polkadot/xcm/procedural/src/lib.rs | 6 +- .../xcm/procedural/tests/builder_pattern.rs | 81 +++++ polkadot/xcm/procedural/tests/ui.rs | 2 +- .../badly_formatted_attribute.rs | 32 ++ .../badly_formatted_attribute.stderr | 5 + .../buy_execution_named_fields.rs | 30 ++ .../buy_execution_named_fields.stderr | 5 + .../loads_holding_no_operands.rs | 32 ++ .../loads_holding_no_operands.stderr | 6 + .../ui/builder_pattern/no_buy_execution.rs | 29 ++ .../builder_pattern/no_buy_execution.stderr | 6 + .../ui/builder_pattern/no_unpaid_execution.rs | 29 ++ .../no_unpaid_execution.stderr | 6 + .../builder_pattern/unexpected_attribute.rs | 32 ++ .../unexpected_attribute.stderr | 5 + .../unpaid_execution_named_fields.rs | 30 ++ .../unpaid_execution_named_fields.stderr | 5 + .../wrong_target.rs} | 0 .../wrong_target.stderr} | 2 +- polkadot/xcm/src/v3/mod.rs | 4 + polkadot/xcm/xcm-simulator/example/src/lib.rs | 19 -- prdoc/pr_2253.prdoc | 24 ++ 25 files changed, 625 insertions(+), 54 deletions(-) create mode 100644 polkadot/xcm/procedural/tests/builder_pattern.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs create mode 100644 polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr rename polkadot/xcm/procedural/tests/ui/{builder_pattern.rs => builder_pattern/wrong_target.rs} (100%) rename polkadot/xcm/procedural/tests/ui/{builder_pattern.stderr => builder_pattern/wrong_target.stderr} (63%) create mode 100644 prdoc/pr_2253.prdoc diff --git a/Cargo.lock b/Cargo.lock index 697f232f971..88cefe7bbef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21512,6 +21512,7 @@ dependencies = [ "Inflector", "proc-macro2", "quote", + "staging-xcm", "syn 2.0.38", "trybuild", ] diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 33c2a94be0e..8ab27c91dae 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -18,3 +18,4 @@ Inflector = "0.11.4" [dev-dependencies] trybuild = { version = "1.0.74", features = ["diff"] } +xcm = { package = "staging-xcm", path = ".." } diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index ebad54e972b..1cb795ea9b2 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -17,56 +17,83 @@ //! Derive macro for creating XCMs with a builder pattern use inflector::Inflector; -use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; use syn::{ - parse_macro_input, Data, DeriveInput, Error, Expr, ExprLit, Fields, Lit, Meta, MetaNameValue, + Data, DataEnum, DeriveInput, Error, Expr, ExprLit, Fields, Ident, Lit, Meta, MetaNameValue, + Result, Variant, }; -pub fn derive(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - let builder_impl = match &input.data { - Data::Enum(data_enum) => generate_methods_for_enum(input.ident, data_enum), - _ => - return Error::new_spanned(&input, "Expected the `Instruction` enum") - .to_compile_error() - .into(), +pub fn derive(input: DeriveInput) -> Result { + let data_enum = match &input.data { + Data::Enum(data_enum) => data_enum, + _ => return Err(Error::new_spanned(&input, "Expected the `Instruction` enum")), }; + let builder_raw_impl = generate_builder_raw_impl(&input.ident, data_enum); + let builder_impl = generate_builder_impl(&input.ident, data_enum)?; + let builder_unpaid_impl = generate_builder_unpaid_impl(&input.ident, data_enum)?; let output = quote! { - pub struct XcmBuilder(Vec>); + /// A trait for types that track state inside the XcmBuilder + pub trait XcmBuilderState {} + + /// Access to all the instructions + pub enum AnythingGoes {} + /// You need to pay for execution + pub enum PaymentRequired {} + /// The holding register was loaded, now to buy execution + pub enum LoadedHolding {} + /// Need to explicitly state it won't pay for fees + pub enum ExplicitUnpaidRequired {} + + impl XcmBuilderState for AnythingGoes {} + impl XcmBuilderState for PaymentRequired {} + impl XcmBuilderState for LoadedHolding {} + impl XcmBuilderState for ExplicitUnpaidRequired {} + + /// Type used to build XCM programs + pub struct XcmBuilder { + pub(crate) instructions: Vec>, + pub state: core::marker::PhantomData, + } + impl Xcm { - pub fn builder() -> XcmBuilder { - XcmBuilder::(Vec::new()) + pub fn builder() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } + } + pub fn builder_unpaid() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } + } + pub fn builder_unsafe() -> XcmBuilder { + XcmBuilder:: { + instructions: Vec::new(), + state: core::marker::PhantomData, + } } } #builder_impl + #builder_unpaid_impl + #builder_raw_impl }; - output.into() + Ok(output) } -fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> TokenStream2 { +fn generate_builder_raw_impl(name: &Ident, data_enum: &DataEnum) -> TokenStream2 { let methods = data_enum.variants.iter().map(|variant| { let variant_name = &variant.ident; let method_name_string = &variant_name.to_string().to_snake_case(); let method_name = syn::Ident::new(&method_name_string, variant_name.span()); - let docs: Vec<_> = variant - .attrs - .iter() - .filter_map(|attr| match &attr.meta { - Meta::NameValue(MetaNameValue { - value: Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }), - .. - }) if attr.path().is_ident("doc") => Some(literal.value()), - _ => None, - }) - .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) - .collect(); + let docs = get_doc_comments(&variant); let method = match &variant.fields { Fields::Unit => { quote! { pub fn #method_name(mut self) -> Self { - self.0.push(#name::::#variant_name); + self.instructions.push(#name::::#variant_name); self } } @@ -81,7 +108,7 @@ fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> Tok let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); quote! { pub fn #method_name(mut self, #(#arg_names: #arg_types),*) -> Self { - self.0.push(#name::::#variant_name(#(#arg_names),*)); + self.instructions.push(#name::::#variant_name(#(#arg_names),*)); self } } @@ -91,7 +118,7 @@ fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> Tok let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); quote! { pub fn #method_name(mut self, #(#arg_names: #arg_types),*) -> Self { - self.0.push(#name::::#variant_name { #(#arg_names),* }); + self.instructions.push(#name::::#variant_name { #(#arg_names),* }); self } } @@ -103,13 +130,209 @@ fn generate_methods_for_enum(name: syn::Ident, data_enum: &syn::DataEnum) -> Tok } }); let output = quote! { - impl XcmBuilder { + impl XcmBuilder { #(#methods)* pub fn build(self) -> Xcm { - Xcm(self.0) + Xcm(self.instructions) } } }; output } + +fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result { + // We first require an instruction that load the holding register + let load_holding_variants = data_enum + .variants + .iter() + .map(|variant| { + let maybe_builder_attr = variant.attrs.iter().find(|attr| match attr.meta { + Meta::List(ref list) => { + return list.path.is_ident("builder"); + }, + _ => false, + }); + let builder_attr = match maybe_builder_attr { + Some(builder) => builder.clone(), + None => return Ok(None), /* It's not going to be an instruction that loads the + * holding register */ + }; + let Meta::List(ref list) = builder_attr.meta else { unreachable!("We checked before") }; + let inner_ident: Ident = syn::parse2(list.tokens.clone().into()).map_err(|_| { + Error::new_spanned(&builder_attr, "Expected `builder(loads_holding)`") + })?; + let ident_to_match: Ident = syn::parse_quote!(loads_holding); + if inner_ident == ident_to_match { + Ok(Some(variant)) + } else { + Err(Error::new_spanned(&builder_attr, "Expected `builder(loads_holding)`")) + } + }) + .collect::>>()?; + + let load_holding_methods = load_holding_variants + .into_iter() + .flatten() + .map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(&method_name_string, variant_name.span()); + let docs = get_doc_comments(&variant); + let method = match &variant.fields { + Fields::Unnamed(fields) => { + let arg_names: Vec<_> = fields + .unnamed + .iter() + .enumerate() + .map(|(index, _)| format_ident!("arg{}", index)) + .collect(); + let arg_types: Vec<_> = fields.unnamed.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name(#(#arg_names),*)); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + Fields::Named(fields) => { + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + _ => + return Err(Error::new_spanned( + &variant, + "Instructions that load the holding register should take operands", + )), + }; + Ok(method) + }) + .collect::, _>>()?; + + let first_impl = quote! { + impl XcmBuilder { + #(#load_holding_methods)* + } + }; + + // Then we require fees to be paid + let buy_execution_method = data_enum + .variants + .iter() + .find(|variant| variant.ident.to_string() == "BuyExecution") + .map_or( + Err(Error::new_spanned(&data_enum.variants, "No BuyExecution instruction")), + |variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(&method_name_string, variant_name.span()); + let docs = get_doc_comments(&variant); + let fields = match &variant.fields { + Fields::Named(fields) => { + let arg_names: Vec<_> = + fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = + fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }, + _ => + return Err(Error::new_spanned( + &variant, + "BuyExecution should have named fields", + )), + }; + Ok(fields) + }, + )?; + + let second_impl = quote! { + impl XcmBuilder { + #buy_execution_method + } + }; + + let output = quote! { + #first_impl + #second_impl + }; + + Ok(output) +} + +fn generate_builder_unpaid_impl(name: &Ident, data_enum: &DataEnum) -> Result { + let unpaid_execution_variant = data_enum + .variants + .iter() + .find(|variant| variant.ident.to_string() == "UnpaidExecution") + .ok_or(Error::new_spanned(&data_enum.variants, "No UnpaidExecution instruction"))?; + let unpaid_execution_ident = &unpaid_execution_variant.ident; + let unpaid_execution_method_name = Ident::new( + &unpaid_execution_ident.to_string().to_snake_case(), + unpaid_execution_ident.span(), + ); + let docs = get_doc_comments(&unpaid_execution_variant); + let fields = match &unpaid_execution_variant.fields { + Fields::Named(fields) => fields, + _ => + return Err(Error::new_spanned( + &unpaid_execution_variant, + "UnpaidExecution should have named fields", + )), + }; + let arg_names: Vec<_> = fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = fields.named.iter().map(|field| &field.ty).collect(); + Ok(quote! { + impl XcmBuilder { + #(#docs)* + pub fn #unpaid_execution_method_name(self, #(#arg_names: #arg_types),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + new_instructions.push(#name::::#unpaid_execution_ident { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, + } + } + } + }) +} + +fn get_doc_comments(variant: &Variant) -> Vec { + variant + .attrs + .iter() + .filter_map(|attr| match &attr.meta { + Meta::NameValue(MetaNameValue { + value: Expr::Lit(ExprLit { lit: Lit::Str(literal), .. }), + .. + }) if attr.path().is_ident("doc") => Some(literal.value()), + _ => None, + }) + .map(|doc| syn::parse_str::(&format!("/// {}", doc)).unwrap()) + .collect() +} diff --git a/polkadot/xcm/procedural/src/lib.rs b/polkadot/xcm/procedural/src/lib.rs index 83cc6cdf98f..7600e817d0e 100644 --- a/polkadot/xcm/procedural/src/lib.rs +++ b/polkadot/xcm/procedural/src/lib.rs @@ -17,6 +17,7 @@ //! Procedural macros used in XCM. use proc_macro::TokenStream; +use syn::{parse_macro_input, DeriveInput}; mod builder_pattern; mod v2; @@ -56,7 +57,10 @@ pub fn impl_conversion_functions_for_junctions_v3(input: TokenStream) -> TokenSt /// .buy_execution(fees, weight_limit) /// .deposit_asset(assets, beneficiary) /// .build(); -#[proc_macro_derive(Builder)] +#[proc_macro_derive(Builder, attributes(builder))] pub fn derive_builder(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); builder_pattern::derive(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() } diff --git a/polkadot/xcm/procedural/tests/builder_pattern.rs b/polkadot/xcm/procedural/tests/builder_pattern.rs new file mode 100644 index 00000000000..eab9d67121f --- /dev/null +++ b/polkadot/xcm/procedural/tests/builder_pattern.rs @@ -0,0 +1,81 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test the methods generated by the Builder derive macro. +//! Tests directly on the actual Xcm struct and Instruction enum. + +use xcm::latest::prelude::*; + +#[test] +fn builder_pattern_works() { + let asset: MultiAsset = (Here, 100u128).into(); + let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); + let message: Xcm<()> = Xcm::builder() + .receive_teleported_asset(asset.clone().into()) + .buy_execution(asset.clone(), Unlimited) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); + assert_eq!( + message, + Xcm(vec![ + ReceiveTeleportedAsset(asset.clone().into()), + BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, + DepositAsset { assets: asset.into(), beneficiary }, + ]) + ); +} + +#[test] +fn default_builder_requires_buy_execution() { + let asset: MultiAsset = (Here, 100u128).into(); + let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); + // This is invalid, since it doesn't pay for fees. + // This is enforced by the runtime, because the build() method doesn't exist + // on the resulting type. + // let message: Xcm<()> = Xcm::builder() + // .withdraw_asset(asset.clone().into()) + // .deposit_asset(asset.into(), beneficiary) + // .build(); + + // To be able to do that, we need to use the explicitly unpaid variant + let message: Xcm<()> = Xcm::builder_unpaid() + .unpaid_execution(Unlimited, None) + .withdraw_asset(asset.clone().into()) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); // This works + assert_eq!( + message, + Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + WithdrawAsset(asset.clone().into()), + DepositAsset { assets: asset.clone().into(), beneficiary }, + ]) + ); + + // The other option doesn't have any limits whatsoever, so it should + // only be used when you really know what you're doing. + let message: Xcm<()> = Xcm::builder_unsafe() + .withdraw_asset(asset.clone().into()) + .deposit_asset(asset.clone().into(), beneficiary) + .build(); + assert_eq!( + message, + Xcm(vec![ + WithdrawAsset(asset.clone().into()), + DepositAsset { assets: asset.clone().into(), beneficiary }, + ]) + ); +} diff --git a/polkadot/xcm/procedural/tests/ui.rs b/polkadot/xcm/procedural/tests/ui.rs index a6ec35d0862..a30f4d7dee5 100644 --- a/polkadot/xcm/procedural/tests/ui.rs +++ b/polkadot/xcm/procedural/tests/ui.rs @@ -28,5 +28,5 @@ fn ui() { std::env::set_var("SKIP_WASM_BUILD", "1"); let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/*.rs"); + t.compile_fail("tests/ui/**/*.rs"); } diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs new file mode 100644 index 00000000000..3a103f3ddc4 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when using a badly formatted attribute. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(funds_holding = 2)] + WithdrawAsset(u128), + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr new file mode 100644 index 00000000000..978faf2e868 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/badly_formatted_attribute.stderr @@ -0,0 +1,5 @@ +error: Expected `builder(loads_holding)` + --> tests/ui/builder_pattern/badly_formatted_attribute.rs:25:5 + | +25 | #[builder(funds_holding = 2)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs new file mode 100644 index 00000000000..dc5c679a96e --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when the `BuyExecution` instruction doesn't take named fields. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution(u128), + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr new file mode 100644 index 00000000000..dc8246770ba --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/buy_execution_named_fields.stderr @@ -0,0 +1,5 @@ +error: BuyExecution should have named fields + --> tests/ui/builder_pattern/buy_execution_named_fields.rs:25:5 + | +25 | BuyExecution(u128), + | ^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs new file mode 100644 index 00000000000..070f0be6bac --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when an instruction that loads the holding register doesn't take operands. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(loads_holding)] + WithdrawAsset, + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr new file mode 100644 index 00000000000..0358a35ad3d --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/loads_holding_no_operands.stderr @@ -0,0 +1,6 @@ +error: Instructions that load the holding register should take operands + --> tests/ui/builder_pattern/loads_holding_no_operands.rs:25:5 + | +25 | / #[builder(loads_holding)] +26 | | WithdrawAsset, + | |_________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs new file mode 100644 index 00000000000..1ed8dd38cba --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when there's no `BuyExecution` instruction. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr new file mode 100644 index 00000000000..d8798c8223f --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_buy_execution.stderr @@ -0,0 +1,6 @@ +error: No BuyExecution instruction + --> tests/ui/builder_pattern/no_buy_execution.rs:25:5 + | +25 | / UnpaidExecution { weight_limit: (u32, u32) }, +26 | | Transact { call: Call }, + | |____________________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs new file mode 100644 index 00000000000..d542102d2d3 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.rs @@ -0,0 +1,29 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when there's no `UnpaidExecution` instruction. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution { fees: u128 }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr new file mode 100644 index 00000000000..c8c0748da72 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/no_unpaid_execution.stderr @@ -0,0 +1,6 @@ +error: No UnpaidExecution instruction + --> tests/ui/builder_pattern/no_unpaid_execution.rs:25:5 + | +25 | / BuyExecution { fees: u128 }, +26 | | Transact { call: Call }, + | |____________________________^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs new file mode 100644 index 00000000000..5808ec571ce --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.rs @@ -0,0 +1,32 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when using wrong attribute. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + #[builder(funds_holding)] + WithdrawAsset(u128), + BuyExecution { fees: u128 }, + UnpaidExecution { weight_limit: (u32, u32) }, + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr new file mode 100644 index 00000000000..1ff9d185136 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unexpected_attribute.stderr @@ -0,0 +1,5 @@ +error: Expected `builder(loads_holding)` + --> tests/ui/builder_pattern/unexpected_attribute.rs:25:5 + | +25 | #[builder(funds_holding)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs new file mode 100644 index 00000000000..bb98d603fd9 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.rs @@ -0,0 +1,30 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Test error when the `BuyExecution` instruction doesn't take named fields. + +use xcm_procedural::Builder; + +struct Xcm(pub Vec>); + +#[derive(Builder)] +enum Instruction { + BuyExecution { fees: u128 }, + UnpaidExecution(u32, u32), + Transact { call: Call }, +} + +fn main() {} diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr new file mode 100644 index 00000000000..0a3c0a40a33 --- /dev/null +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/unpaid_execution_named_fields.stderr @@ -0,0 +1,5 @@ +error: UnpaidExecution should have named fields + --> tests/ui/builder_pattern/unpaid_execution_named_fields.rs:26:5 + | +26 | UnpaidExecution(u32, u32), + | ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern.rs b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.rs similarity index 100% rename from polkadot/xcm/procedural/tests/ui/builder_pattern.rs rename to polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.rs diff --git a/polkadot/xcm/procedural/tests/ui/builder_pattern.stderr b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr similarity index 63% rename from polkadot/xcm/procedural/tests/ui/builder_pattern.stderr rename to polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr index 439b40f31ca..007aa0b5ff3 100644 --- a/polkadot/xcm/procedural/tests/ui/builder_pattern.stderr +++ b/polkadot/xcm/procedural/tests/ui/builder_pattern/wrong_target.stderr @@ -1,5 +1,5 @@ error: Expected the `Instruction` enum - --> tests/ui/builder_pattern.rs:23:1 + --> tests/ui/builder_pattern/wrong_target.rs:23:1 | 23 | struct SomeStruct; | ^^^^^^^^^^^^^^^^^^ diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index 4217528f227..bbdd504ceb0 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -426,6 +426,7 @@ pub enum Instruction { /// Kind: *Command*. /// /// Errors: + #[builder(loads_holding)] WithdrawAsset(MultiAssets), /// Asset(s) (`assets`) have been received into the ownership of this system on the `origin` @@ -439,6 +440,7 @@ pub enum Instruction { /// Kind: *Trusted Indication*. /// /// Errors: + #[builder(loads_holding)] ReserveAssetDeposited(MultiAssets), /// Asset(s) (`assets`) have been destroyed on the `origin` system and equivalent assets should @@ -452,6 +454,7 @@ pub enum Instruction { /// Kind: *Trusted Indication*. /// /// Errors: + #[builder(loads_holding)] ReceiveTeleportedAsset(MultiAssets), /// Respond with information that the local system is expecting. @@ -776,6 +779,7 @@ pub enum Instruction { /// Kind: *Command* /// /// Errors: + #[builder(loads_holding)] ClaimAsset { assets: MultiAssets, ticket: MultiLocation }, /// Always throws an error of type `Trap`. diff --git a/polkadot/xcm/xcm-simulator/example/src/lib.rs b/polkadot/xcm/xcm-simulator/example/src/lib.rs index 03e7c19a914..85b8ad1c5cb 100644 --- a/polkadot/xcm/xcm-simulator/example/src/lib.rs +++ b/polkadot/xcm/xcm-simulator/example/src/lib.rs @@ -649,23 +649,4 @@ mod tests { ); }); } - - #[test] - fn builder_pattern_works() { - let asset: MultiAsset = (Here, 100u128).into(); - let beneficiary: MultiLocation = AccountId32 { id: [0u8; 32], network: None }.into(); - let message: Xcm<()> = Xcm::builder() - .withdraw_asset(asset.clone().into()) - .buy_execution(asset.clone(), Unlimited) - .deposit_asset(asset.clone().into(), beneficiary) - .build(); - assert_eq!( - message, - Xcm(vec![ - WithdrawAsset(asset.clone().into()), - BuyExecution { fees: asset.clone(), weight_limit: Unlimited }, - DepositAsset { assets: asset.into(), beneficiary }, - ]) - ); - } } diff --git a/prdoc/pr_2253.prdoc b/prdoc/pr_2253.prdoc new file mode 100644 index 00000000000..398b0a29066 --- /dev/null +++ b/prdoc/pr_2253.prdoc @@ -0,0 +1,24 @@ +# Schema: Parity PR Documentation Schema (prdoc) +# See doc at https://github.com/paritytech/prdoc + +title: Different builder pattern constructors for XCM + +doc: + - audience: Core Dev + description: | + The `builder()` constructor for XCM programs now only allows building messages that pay for fees, + i.e. messages that would pass the `AllowTopLevelPaidExecutionFrom` barrier. + Another constructor, `builder_unpaid()` requires an explicit `UnpaidExecution` instruction before + anything else. + For building messages without any restriction, `builder_unsafe` can be used. + This has been named like that since in general the other two should be used instead, but it's okay + to use it for teaching purposes or for experimenting. + +migrations: + db: [] + + runtime: [] + +crates: [] + +host_functions: [] -- GitLab From f5ad32e406e5108ea87f05a0b99406b80ed3226c Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Tue, 21 Nov 2023 16:49:47 +0100 Subject: [PATCH 092/199] added action to pass review bot on merge queue (#2414) This action will trigger when a merge queue is created and will add a positive status check under the `review-bot` account, allowing the PR to pass the required check for the merge. --------- Co-authored-by: Chevdor --- .github/workflows/merge-queue.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 .github/workflows/merge-queue.yml diff --git a/.github/workflows/merge-queue.yml b/.github/workflows/merge-queue.yml new file mode 100644 index 00000000000..f3fb7765ca6 --- /dev/null +++ b/.github/workflows/merge-queue.yml @@ -0,0 +1,24 @@ +name: Merge-Queue + +on: + merge_group: + +jobs: + trigger-merge-queue-action: + runs-on: ubuntu-latest + environment: master + steps: + - name: Generate token + id: app_token + uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + with: + app_id: ${{ secrets.REVIEW_APP_ID }} + private_key: ${{ secrets.REVIEW_APP_KEY }} + - name: Add Merge Queue status check + uses: billyjbryant/create-status-check@3e6fa0ac599d10d9588cf9516ca4330ef669b858 # v2 + with: + authToken: ${{ steps.app_token.outputs.token }} + context: 'review-bot' + description: 'PRs for merge queue gets approved' + state: 'success' + sha: ${{ github.event.merge_group.head_commit.id }} -- GitLab From 50811d6b424185e1e6c0762a8b1c95db893c6750 Mon Sep 17 00:00:00 2001 From: Sophia Gold Date: Tue, 21 Nov 2023 11:34:05 -0500 Subject: [PATCH 093/199] Update tick collator for async backing (#1497) This updates the tick runtime and polkadot-parachain collator to use async backing. --- Cargo.lock | 1 + .../testing/rococo-parachain/Cargo.toml | 2 + .../testing/rococo-parachain/src/lib.rs | 47 ++++++++++++------- cumulus/polkadot-parachain/src/service.rs | 34 +++++++++----- 4 files changed, 55 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 88cefe7bbef..903e9463cf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14530,6 +14530,7 @@ dependencies = [ "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", + "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-utility", "frame-benchmarking", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 5e9d347a25d..08fa8b69127 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -52,6 +52,7 @@ cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-ping = { path = "../../../pallets/ping", default-features = false } +cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -70,6 +71,7 @@ std = [ "cumulus-pallet-xcm/std", "cumulus-pallet-xcmp-queue/std", "cumulus-ping/std", + "cumulus-primitives-aura/std", "cumulus-primitives-core/std", "cumulus-primitives-utility/std", "frame-benchmarking?/std", diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 6df00d43e8d..965fb0d6adc 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -22,13 +22,13 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; +use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; use sp_api::impl_runtime_apis; use sp_core::OpaqueMetadata; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, Hash as HashT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -113,7 +113,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { state_version: 0, }; -pub const MILLISECS_PER_BLOCK: u64 = 12000; +pub const MILLISECS_PER_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; @@ -143,18 +143,18 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for .5 seconds of compute with a 12 second average block time. +/// We allow for 2 seconds of compute with a 6 second average block time. const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND.saturating_div(2), + WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, ); /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included /// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; +const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; /// How many parachain blocks are processed by the relay chain per parent. Limits the /// number of blocks authored per slot. -const BLOCK_PROCESSING_VELOCITY: u32 = 1; +const BLOCK_PROCESSING_VELOCITY: u32 = 2; /// Relay chain slot duration, in milliseconds. const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; @@ -277,6 +277,13 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } +type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + RELAY_CHAIN_SLOT_DURATION_MILLIS, + BLOCK_PROCESSING_VELOCITY, + UNINCLUDED_SEGMENT_CAPACITY, +>; + impl cumulus_pallet_parachain_system::Config for Runtime { type WeightInfo = (); type RuntimeEvent = RuntimeEvent; @@ -287,13 +294,8 @@ impl cumulus_pallet_parachain_system::Config for Runtime { type ReservedDmpWeight = ReservedDmpWeight; type XcmpMessageHandler = XcmpQueue; type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; + type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; + type ConsensusHook = ConsensusHook; } impl parachain_info::Config for Runtime {} @@ -584,9 +586,9 @@ impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; + type AllowMultipleBlocksPerSlot = ConstBool; #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; + type SlotDuration = ConstU64; } construct_runtime! { @@ -624,7 +626,7 @@ pub type Balance = u128; /// Index of a transaction in the chain. pub type Nonce = u32; /// A hash of some data used by the chain. -pub type Hash = sp_core::H256; +pub type Hash = ::Output; /// An index to a block. pub type BlockNumber = u32; /// The address format for describing accounts. @@ -751,7 +753,7 @@ impl_runtime_apis! { impl sp_consensus_aura::AuraApi for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + sp_consensus_aura::SlotDuration::from_millis(SLOT_DURATION) } fn authorities() -> Vec { @@ -824,6 +826,15 @@ impl_runtime_apis! { build_config::(config) } } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + included_hash: ::Hash, + slot: cumulus_primitives_aura::Slot, + ) -> bool { + ConsensusHook::can_build_upon(included_hash, slot) + } + } } cumulus_pallet_parachain_system::register_validate_block! { diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 3884fce246c..f580835db55 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -590,6 +590,7 @@ where CollatorPair, OverseerHandle, Arc>) + Send + Sync>, + Arc, ) -> Result<(), sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); @@ -723,6 +724,7 @@ where collator_key.expect("Command line arguments do not allow this. qed"), overseer_handle, announce_block, + backend.clone(), )?; } @@ -983,7 +985,8 @@ pub async fn start_rococo_parachain_node( para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -1002,11 +1005,15 @@ pub async fn start_rococo_parachain_node( client.clone(), ); - let params = BasicAuraParams { + let params = AuraParams { create_inherent_data_providers: move |_, ()| async move { Ok(()) }, block_import, - para_client: client, + para_client: client.clone(), + para_backend: backend.clone(), relay_client: relay_chain_interface, + code_hash_provider: move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + }, sync_oracle, keystore, collator_key, @@ -1016,12 +1023,10 @@ pub async fn start_rococo_parachain_node( relay_chain_slot_duration, proposer, collator_service, - // Very limited proposal time. - authoring_duration: Duration::from_millis(500), - collation_request_receiver: None, + authoring_duration: Duration::from_millis(1500), }; - let fut = basic_aura::run::< + let fut = aura::run::< Block, sp_consensus_aura::sr25519::AuthorityPair, _, @@ -1031,6 +1036,8 @@ pub async fn start_rococo_parachain_node( _, _, _, + _, + _, >(params); task_manager.spawn_essential_handle().spawn("aura", None, fut); @@ -1376,7 +1383,8 @@ where para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + _backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( @@ -1471,7 +1479,8 @@ where para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + _backend| { let relay_chain_interface2 = relay_chain_interface.clone(); let collator_service = CollatorService::new( @@ -1642,7 +1651,7 @@ where para_backend: backend.clone(), relay_client: relay_chain_interface, code_hash_provider: move |block_hash| { - client.code_at(block_hash).ok().map(ValidationCode).map(|c| c.hash()) + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) }, sync_oracle, keystore, @@ -1713,6 +1722,7 @@ where CollatorPair, OverseerHandle, Arc>) + Send + Sync>, + Arc, ) -> Result<(), sc_service::Error>, { let parachain_config = prepare_node_config(parachain_config); @@ -1845,6 +1855,7 @@ where collator_key.expect("Command line arguments do not allow this. qed"), overseer_handle, announce_block, + backend.clone(), )?; } @@ -1923,7 +1934,8 @@ pub async fn start_contracts_rococo_node( para_id, collator_key, overseer_handle, - announce_block| { + announce_block, + _backend| { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( -- GitLab From 2183669d05f9b510f979a0cc3c7847707bacba2e Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Tue, 21 Nov 2023 20:03:55 +0100 Subject: [PATCH 094/199] cumulus-test-service: block import fix (#2430) This is follow-up for: https://github.com/paritytech/polkadot-sdk/pull/2001 Block import queue for `test-parachain` (`cumulus-test-service`) shall use delayed best block feature. This should fixed broken zombienet tests. --- cumulus/test/service/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 6fd3e4d43d7..4d4c60d1bda 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -211,7 +211,8 @@ pub fn new_partial( sc_service::new_full_parts::(config, None, executor)?; let client = Arc::new(client); - let block_import = ParachainBlockImport::new(client.clone(), backend.clone()); + let block_import = + ParachainBlockImport::new_with_delayed_best_block(client.clone(), backend.clone()); let registry = config.prometheus_registry(); -- GitLab From 7a32f4be48e514eccfbe5f8d2c2366c049cc4e3d Mon Sep 17 00:00:00 2001 From: Ross Bulat Date: Wed, 22 Nov 2023 16:22:28 +0700 Subject: [PATCH 095/199] Deprecate `RewardDestination::Controller` (#2380) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Deprecates `RewardDestination::Controller` variant. - [x] `RewardDestination::Controller` annotated with `#[deprecated]`. - [x] `Controller` variant is now handled the same way as `Stash` in `payout_stakers`. - [x] `set_payee` errors if `RewardDestination::Controller` is provided. - [x] Added `update_payee` call to lazily migrate `RewardDestination::Controller` `Payee` storage entries to `RewardDestination::Account(controller)` . - [x] `payout_stakers_dead_controller` has been removed from benches & weights - was not used. - [x] Tests no longer use `RewardDestination::Controller`. --------- Co-authored-by: command-bot <> Co-authored-by: Gonçalo Pestana Co-authored-by: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> --- .../westend/src/weights/pallet_staking.rs | 238 ++++----- substrate/frame/staking/src/benchmarking.rs | 54 +- substrate/frame/staking/src/lib.rs | 6 +- substrate/frame/staking/src/mock.rs | 2 +- substrate/frame/staking/src/pallet/impls.rs | 13 +- substrate/frame/staking/src/pallet/mod.rs | 45 +- substrate/frame/staking/src/tests.rs | 203 ++++---- substrate/frame/staking/src/weights.rs | 468 ++++++++---------- 8 files changed, 475 insertions(+), 554 deletions(-) diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 3c4542c6d6f..87b603621e8 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -17,7 +17,7 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 @@ -62,8 +62,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `894` // Estimated: `4764` - // Minimum execution time: 39_950_000 picoseconds. - Weight::from_parts(41_107_000, 0) + // Minimum execution time: 38_052_000 picoseconds. + Weight::from_parts(39_303_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(4)) @@ -84,8 +84,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1921` // Estimated: `8877` - // Minimum execution time: 83_828_000 picoseconds. - Weight::from_parts(85_733_000, 0) + // Minimum execution time: 81_690_000 picoseconds. + Weight::from_parts(83_889_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) @@ -112,8 +112,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2128` // Estimated: `8877` - // Minimum execution time: 89_002_000 picoseconds. - Weight::from_parts(91_556_000, 0) + // Minimum execution time: 84_409_000 picoseconds. + Weight::from_parts(87_330_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(7)) @@ -133,11 +133,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1075` // Estimated: `4764` - // Minimum execution time: 40_839_000 picoseconds. - Weight::from_parts(42_122_428, 0) + // Minimum execution time: 39_770_000 picoseconds. + Weight::from_parts(40_828_632, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 884 - .saturating_add(Weight::from_parts(46_036, 0).saturating_mul(s.into())) + // Standard Error: 824 + .saturating_add(Weight::from_parts(51_107, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -174,11 +174,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 84_244_000 picoseconds. - Weight::from_parts(91_199_964, 0) + // Minimum execution time: 82_500_000 picoseconds. + Weight::from_parts(90_099_121, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 3_381 - .saturating_add(Weight::from_parts(1_327_289, 0).saturating_mul(s.into())) + // Standard Error: 3_280 + .saturating_add(Weight::from_parts(1_273_212, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -210,8 +210,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1301` // Estimated: `4556` - // Minimum execution time: 49_693_000 picoseconds. - Weight::from_parts(50_814_000, 0) + // Minimum execution time: 48_236_000 picoseconds. + Weight::from_parts(49_518_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(5)) @@ -225,11 +225,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1243 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_140_000 picoseconds. - Weight::from_parts(28_309_627, 0) + // Minimum execution time: 28_280_000 picoseconds. + Weight::from_parts(29_182_740, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 5_780 - .saturating_add(Weight::from_parts(6_509_869, 0).saturating_mul(k.into())) + // Standard Error: 6_102 + .saturating_add(Weight::from_parts(6_412_107, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -262,11 +262,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1797 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 61_377_000 picoseconds. - Weight::from_parts(58_805_232, 0) + // Minimum execution time: 59_846_000 picoseconds. + Weight::from_parts(58_029_857, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 14_197 - .saturating_add(Weight::from_parts(4_090_197, 0).saturating_mul(n.into())) + // Standard Error: 15_967 + .saturating_add(Weight::from_parts(3_898_764, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6)) @@ -290,8 +290,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1581` // Estimated: `6248` - // Minimum execution time: 52_736_000 picoseconds. - Weight::from_parts(54_573_000, 0) + // Minimum execution time: 51_223_000 picoseconds. + Weight::from_parts(52_310_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(6)) @@ -306,12 +306,28 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `865` // Estimated: `4556` - // Minimum execution time: 16_496_000 picoseconds. - Weight::from_parts(17_045_000, 0) + // Minimum execution time: 15_762_000 picoseconds. + Weight::from_parts(16_381_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `932` + // Estimated: `4556` + // Minimum execution time: 21_904_000 picoseconds. + Weight::from_parts(22_373_000, 0) + .saturating_add(Weight::from_parts(0, 4556)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:2) @@ -320,8 +336,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `865` // Estimated: `4556` - // Minimum execution time: 19_339_000 picoseconds. - Weight::from_parts(20_187_000, 0) + // Minimum execution time: 18_869_000 picoseconds. + Weight::from_parts(19_422_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(3)) @@ -332,8 +348,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_340_000 picoseconds. - Weight::from_parts(2_551_000, 0) + // Minimum execution time: 2_205_000 picoseconds. + Weight::from_parts(2_320_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -343,8 +359,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_483_000 picoseconds. - Weight::from_parts(8_101_000, 0) + // Minimum execution time: 7_179_000 picoseconds. + Weight::from_parts(7_843_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -354,8 +370,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_773_000 picoseconds. - Weight::from_parts(8_610_000, 0) + // Minimum execution time: 7_206_000 picoseconds. + Weight::from_parts(7_829_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -365,8 +381,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_577_000 picoseconds. - Weight::from_parts(7_937_000, 0) + // Minimum execution time: 7_414_000 picoseconds. + Weight::from_parts(7_770_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -377,11 +393,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_522_000 picoseconds. - Weight::from_parts(2_735_307, 0) + // Minimum execution time: 2_256_000 picoseconds. + Weight::from_parts(2_645_840, 0) .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 38 - .saturating_add(Weight::from_parts(10_553, 0).saturating_mul(v.into())) + // Standard Error: 37 + .saturating_add(Weight::from_parts(10_207, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) @@ -417,11 +433,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 82_547_000 picoseconds. - Weight::from_parts(89_373_781, 0) + // Minimum execution time: 81_032_000 picoseconds. + Weight::from_parts(88_297_596, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 3_589 - .saturating_add(Weight::from_parts(1_258_878, 0).saturating_mul(s.into())) + // Standard Error: 3_070 + .saturating_add(Weight::from_parts(1_207_207, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13)) .saturating_add(T::DbWeight::get().writes(12)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -434,56 +450,14 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `66639` // Estimated: `70104` - // Minimum execution time: 134_619_000 picoseconds. - Weight::from_parts(1_194_949_665, 0) + // Minimum execution time: 131_456_000 picoseconds. + Weight::from_parts(935_254_517, 0) .saturating_add(Weight::from_parts(0, 70104)) - // Standard Error: 76_719 - .saturating_add(Weight::from_parts(6_455_953, 0).saturating_mul(s.into())) + // Standard Error: 57_806 + .saturating_add(Weight::from_parts(4_823_189, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) - /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) - /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:65 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:66 w:66) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::ClaimedRewards` (r:1 w:1) - /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) - /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) - /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) - /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:65 w:0) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 64]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `6895 + n * (156 ±0)` - // Estimated: `9802 + n * (2603 ±0)` - // Minimum execution time: 114_338_000 picoseconds. - Weight::from_parts(138_518_124, 0) - .saturating_add(Weight::from_parts(0, 9802)) - // Standard Error: 53_621 - .saturating_add(Weight::from_parts(25_676_781, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(5)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } /// Storage: `Staking::Bonded` (r:65 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:65 w:65) @@ -516,12 +490,12 @@ impl pallet_staking::WeightInfo for WeightInfo { fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `8249 + n * (396 ±0)` - // Estimated: `10779 + n * (3774 ±3)` - // Minimum execution time: 132_719_000 picoseconds. - Weight::from_parts(170_505_880, 0) + // Estimated: `10779 + n * (3774 ±0)` + // Minimum execution time: 129_233_000 picoseconds. + Weight::from_parts(165_096_042, 0) .saturating_add(Weight::from_parts(0, 10779)) - // Standard Error: 32_527 - .saturating_add(Weight::from_parts(42_453_136, 0).saturating_mul(n.into())) + // Standard Error: 29_598 + .saturating_add(Weight::from_parts(40_716_425, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4)) @@ -545,11 +519,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1922 + l * (5 ±0)` // Estimated: `8877` - // Minimum execution time: 78_438_000 picoseconds. - Weight::from_parts(81_774_734, 0) + // Minimum execution time: 77_223_000 picoseconds. + Weight::from_parts(80_026_259, 0) .saturating_add(Weight::from_parts(0, 8877)) - // Standard Error: 3_706 - .saturating_add(Weight::from_parts(51_358, 0).saturating_mul(l.into())) + // Standard Error: 4_493 + .saturating_add(Weight::from_parts(52_909, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(7)) } @@ -584,11 +558,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `2127 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 92_129_000 picoseconds. - Weight::from_parts(94_137_611, 0) + // Minimum execution time: 89_871_000 picoseconds. + Weight::from_parts(92_313_331, 0) .saturating_add(Weight::from_parts(0, 6248)) - // Standard Error: 4_141 - .saturating_add(Weight::from_parts(1_283_823, 0).saturating_mul(s.into())) + // Standard Error: 3_321 + .saturating_add(Weight::from_parts(1_243_347, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().writes(11)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -634,13 +608,13 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0 + n * (716 ±0) + v * (3594 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 527_896_000 picoseconds. - Weight::from_parts(533_325_000, 0) + // Minimum execution time: 518_819_000 picoseconds. + Weight::from_parts(522_108_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 2_064_813 - .saturating_add(Weight::from_parts(68_484_503, 0).saturating_mul(v.into())) - // Standard Error: 205_747 - .saturating_add(Weight::from_parts(18_833_735, 0).saturating_mul(n.into())) + // Standard Error: 1_987_848 + .saturating_add(Weight::from_parts(64_855_377, 0).saturating_mul(v.into())) + // Standard Error: 198_078 + .saturating_add(Weight::from_parts(18_343_485, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(184)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -671,13 +645,13 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `3108 + n * (907 ±0) + v * (391 ±0)` // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 35_302_472_000 picoseconds. - Weight::from_parts(35_651_169_000, 0) + // Minimum execution time: 34_976_277_000 picoseconds. + Weight::from_parts(35_245_501_000, 0) .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 412_098 - .saturating_add(Weight::from_parts(5_172_265, 0).saturating_mul(v.into())) - // Standard Error: 412_098 - .saturating_add(Weight::from_parts(4_142_772, 0).saturating_mul(n.into())) + // Standard Error: 386_461 + .saturating_add(Weight::from_parts(5_145_210, 0).saturating_mul(v.into())) + // Standard Error: 386_461 + .saturating_add(Weight::from_parts(3_762_623, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(179)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -694,11 +668,11 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `946 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_522_650_000 picoseconds. - Weight::from_parts(97_022_833, 0) + // Minimum execution time: 2_577_411_000 picoseconds. + Weight::from_parts(86_073_486, 0) .saturating_add(Weight::from_parts(0, 3510)) - // Standard Error: 6_751 - .saturating_add(Weight::from_parts(4_990_018, 0).saturating_mul(v.into())) + // Standard Error: 8_363 + .saturating_add(Weight::from_parts(5_074_828, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -719,8 +693,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_833_000 picoseconds. - Weight::from_parts(4_108_000, 0) + // Minimum execution time: 3_539_000 picoseconds. + Weight::from_parts(3_903_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -740,8 +714,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_520_000 picoseconds. - Weight::from_parts(3_686_000, 0) + // Minimum execution time: 3_244_000 picoseconds. + Weight::from_parts(3_450_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } @@ -769,8 +743,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `1704` // Estimated: `6248` - // Minimum execution time: 63_983_000 picoseconds. - Weight::from_parts(66_140_000, 0) + // Minimum execution time: 62_606_000 picoseconds. + Weight::from_parts(64_678_000, 0) .saturating_add(Weight::from_parts(0, 6248)) .saturating_add(T::DbWeight::get().reads(11)) .saturating_add(T::DbWeight::get().writes(6)) @@ -783,8 +757,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `658` // Estimated: `3510` - // Minimum execution time: 11_830_000 picoseconds. - Weight::from_parts(12_210_000, 0) + // Minimum execution time: 11_490_000 picoseconds. + Weight::from_parts(11_867_000, 0) .saturating_add(Weight::from_parts(0, 3510)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) @@ -795,8 +769,8 @@ impl pallet_staking::WeightInfo for WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_364_000 picoseconds. - Weight::from_parts(2_555_000, 0) + // Minimum execution time: 2_125_000 picoseconds. + Weight::from_parts(2_337_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(1)) } diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index 05c6bc39709..6f60c4909f4 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -464,16 +464,28 @@ benchmarks! { } set_payee { - let (stash, controller) = create_stash_controller::(USER_SEED, 100, Default::default())?; + let (stash, controller) = create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; assert_eq!(Payee::::get(&stash), RewardDestination::Staked); whitelist_account!(controller); - }: _(RawOrigin::Signed(controller), RewardDestination::Controller) + }: _(RawOrigin::Signed(controller.clone()), RewardDestination::Account(controller.clone())) verify { - assert_eq!(Payee::::get(&stash), RewardDestination::Controller); + assert_eq!(Payee::::get(&stash), RewardDestination::Account(controller)); + } + + update_payee { + let (stash, controller) = create_stash_controller::(USER_SEED, 100, RewardDestination::Staked)?; + Payee::::insert(&stash, { + #[allow(deprecated)] + RewardDestination::Controller + }); + whitelist_account!(controller); + }: _(RawOrigin::Signed(controller.clone()), controller.clone()) + verify { + assert_eq!(Payee::::get(&stash), RewardDestination::Account(controller)); } set_controller { - let (stash, ctlr) = create_unique_stash_controller::(9000, 100, Default::default(), false)?; + let (stash, ctlr) = create_unique_stash_controller::(9000, 100, RewardDestination::Staked, false)?; // ensure `ctlr` is the currently stored controller. assert!(!Ledger::::contains_key(&stash)); assert!(Ledger::::contains_key(&ctlr)); @@ -551,40 +563,6 @@ benchmarks! { assert_eq!(UnappliedSlashes::::get(&era).len(), (MAX_SLASHES - s) as usize); } - payout_stakers_dead_controller { - let n in 0 .. T::MaxExposurePageSize::get() as u32; - let (validator, nominators) = create_validator_with_nominators::( - n, - T::MaxExposurePageSize::get() as u32, - true, - true, - RewardDestination::Controller, - )?; - - let current_era = CurrentEra::::get().unwrap(); - // set the commission for this particular era as well. - >::insert(current_era, validator.clone(), >::validators(&validator)); - - let caller = whitelisted_caller(); - let validator_controller = >::get(&validator).unwrap(); - let balance_before = T::Currency::free_balance(&validator_controller); - for (_, controller) in &nominators { - let balance = T::Currency::free_balance(controller); - ensure!(balance.is_zero(), "Controller has balance, but should be dead."); - } - }: payout_stakers_by_page(RawOrigin::Signed(caller), validator, current_era, 0) - verify { - let balance_after = T::Currency::free_balance(&validator_controller); - ensure!( - balance_before < balance_after, - "Balance of validator controller should have increased after payout.", - ); - for (_, controller) in &nominators { - let balance = T::Currency::free_balance(controller); - ensure!(!balance.is_zero(), "Payout not given to controller."); - } - } - payout_stakers_alive_staked { let n in 0 .. T::MaxExposurePageSize::get() as u32; let (validator, nominators) = create_validator_with_nominators::( diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 9e4697e845b..2cfee38ab4f 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -240,9 +240,9 @@ //! [`Payee`] storage item (see //! [`set_payee`](Call::set_payee)), to be one of the following: //! -//! - Controller account, (obviously) not increasing the staked value. //! - Stash account, not increasing the staked value. //! - Stash account, also increasing the staked value. +//! - Any other account, sent as free balance. //! //! ### Additional Fund Management Operations //! @@ -404,7 +404,9 @@ pub enum RewardDestination { Staked, /// Pay into the stash account, not increasing the amount at stake. Stash, - /// Pay into the controller account. + #[deprecated( + note = "`Controller` will be removed after January 2024. Use `Account(controller)` instead. This variant now behaves the same as `Stash` variant." + )] Controller, /// Pay into a specified account. Account(AccountId), diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index d2afd8f26e2..364d125792b 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -594,7 +594,7 @@ pub(crate) fn current_era() -> EraIndex { pub(crate) fn bond(who: AccountId, val: Balance) { let _ = Balances::make_free_balance_be(&who, val); - assert_ok!(Staking::bond(RuntimeOrigin::signed(who), val, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(who), val, RewardDestination::Stash)); } pub(crate) fn bond_validator(who: AccountId, val: Balance) { diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 40f30735258..6ea8e4c9d3b 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -338,9 +338,8 @@ impl Pallet { let dest = Self::payee(StakingAccount::Stash(stash.clone())); let maybe_imbalance = match dest { - RewardDestination::Controller => Self::bonded(stash) - .map(|controller| T::Currency::deposit_creating(&controller, amount)), - RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), + RewardDestination::Stash => + T::Currency::deposit_into_existing(stash, amount).ok(), RewardDestination::Staked => Self::ledger(Stash(stash.clone())) .and_then(|mut ledger| { ledger.active += amount; @@ -357,6 +356,14 @@ impl Pallet { RewardDestination::Account(dest_account) => Some(T::Currency::deposit_creating(&dest_account, amount)), RewardDestination::None => None, + #[allow(deprecated)] + RewardDestination::Controller => Self::bonded(stash) + .map(|controller| { + defensive!("Paying out controller as reward destination which is deprecated and should be migrated"); + // This should never happen once payees with a `Controller` variant have been migrated. + // But if it does, just pay the controller account. + T::Currency::deposit_creating(&controller, amount) + }), }; maybe_imbalance .map(|imbalance| (imbalance, Self::payee(StakingAccount::Stash(stash.clone())))) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 18ad3e4a6cf..a68e9c90da9 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -842,6 +842,8 @@ pub mod pallet { CommissionTooLow, /// Some bound is not met. BoundNotMet, + /// Used when attempting to use deprecated controller account logic. + ControllerDeprecated, } #[pallet::hooks] @@ -1283,10 +1285,19 @@ pub mod pallet { payee: RewardDestination, ) -> DispatchResult { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(Controller(controller))?; + let ledger = Self::ledger(Controller(controller.clone()))?; + + ensure!( + (payee != { + #[allow(deprecated)] + RewardDestination::Controller + }), + Error::::ControllerDeprecated + ); + let _ = ledger .set_payee(payee) - .defensive_proof("ledger was retrieved from storage, thus its bonded; qed."); + .defensive_proof("ledger was retrieved from storage, thus its bonded; qed.")?; Ok(()) } @@ -1872,6 +1883,36 @@ pub mod pallet { ensure_signed(origin)?; Self::do_payout_stakers_by_page(validator_stash, era, page) } + + /// Migrates an account's `RewardDestination::Controller` to + /// `RewardDestination::Account(controller)`. + /// + /// Effects will be felt instantly (as soon as this function is completed successfully). + /// + /// This will waive the transaction fee if the `payee` is successfully migrated. + #[pallet::call_index(27)] + #[pallet::weight(T::WeightInfo::update_payee())] + pub fn update_payee( + origin: OriginFor, + controller: T::AccountId, + ) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + let ledger = Self::ledger(StakingAccount::Controller(controller.clone()))?; + + ensure!( + (Payee::::get(&ledger.stash) == { + #[allow(deprecated)] + RewardDestination::Controller + }), + Error::::NotController + ); + + let _ = ledger + .set_payee(RewardDestination::Account(controller)) + .defensive_proof("ledger should have been previously retrieved from storage.")?; + + Ok(Pays::No.into()) + } } } diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index bac2530b19b..7d967609f52 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -298,9 +298,9 @@ fn rewards_should_work() { let init_balance_101 = Balances::total_balance(&101); // Set payees - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(21, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); + Payee::::insert(11, RewardDestination::Account(11)); + Payee::::insert(21, RewardDestination::Account(21)); + Payee::::insert(101, RewardDestination::Account(101)); Pallet::::reward_by_ids(vec![(11, 50)]); Pallet::::reward_by_ids(vec![(11, 50)]); @@ -417,7 +417,7 @@ fn staking_should_work() { // --- Block 2: start_session(2); // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 1500, RewardDestination::Account(3))); assert_ok!(Staking::validate(RuntimeOrigin::signed(3), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(3), @@ -585,22 +585,10 @@ fn nominating_and_rewards_should_work() { assert_ok!(Staking::validate(RuntimeOrigin::signed(31), Default::default())); // Set payee to controller. - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(11), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(21), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(31), - RewardDestination::Controller - )); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(41), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(21), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(31), RewardDestination::Stash)); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(41), RewardDestination::Stash)); // give the man some money let initial_balance = 1000; @@ -612,14 +600,14 @@ fn nominating_and_rewards_should_work() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 21, 31])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![11, 21, 41])); @@ -1116,8 +1104,8 @@ fn reward_destination_works() { // (era 1, page 0) is claimed assert_eq!(Staking::claimed_rewards(1, &11), vec![0]); - // Change RewardDestination to Controller - >::insert(&11, RewardDestination::Controller); + // Change RewardDestination to Account + >::insert(&11, RewardDestination::Account(11)); // Check controller balance assert_eq!(Balances::free_balance(11), 23150); @@ -1129,8 +1117,8 @@ fn reward_destination_works() { mock::start_active_era(3); mock::make_all_reward_payment(2); - // Check that RewardDestination is Controller - assert_eq!(Staking::payee(11.into()), RewardDestination::Controller); + // Check that RewardDestination is Account(11) + assert_eq!(Staking::payee(11.into()), RewardDestination::Account(11)); // Check that reward went to the controller account assert_eq!(Balances::free_balance(11), recorded_stash_balance + total_payout_2); // Check that amount at stake is NOT increased @@ -1159,9 +1147,9 @@ fn validator_payment_prefs_work() { let commission = Perbill::from_percent(40); >::insert(&11, ValidatorPrefs { commission, ..Default::default() }); - // Reward controller so staked ratio doesn't change. - >::insert(&11, RewardDestination::Controller); - >::insert(&101, RewardDestination::Controller); + // Reward stash so staked ratio doesn't change. + >::insert(&11, RewardDestination::Stash); + >::insert(&101, RewardDestination::Stash); mock::start_active_era(1); mock::make_all_reward_payment(0); @@ -1250,8 +1238,8 @@ fn bond_extra_and_withdraw_unbonded_works() { // * it can unbond a portion of its funds from the stash account. // * Once the unbonding period is done, it can actually take the funds out of the stash. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1461,8 +1449,8 @@ fn rebond_works() { // * it can unbond a portion of its funds from the stash account. // * it can re-bond a portion of the funds scheduled to unlock. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1587,8 +1575,8 @@ fn rebond_works() { fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1683,8 +1671,8 @@ fn rebond_emits_right_value_in_event() { // When a user calls rebond with more than can be rebonded, things succeed, // and the rebond event emits the actual value rebonded. ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller)); + // Set payee to stash. + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1836,10 +1824,7 @@ fn switching_roles() { ExtBuilder::default().nominate(false).build_and_execute(|| { // Reset reward destination for i in &[11, 21] { - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(*i), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(*i), RewardDestination::Stash)); } assert_eq_uvec!(validator_controllers(), vec![21, 11]); @@ -1850,14 +1835,14 @@ fn switching_roles() { } // add 2 nominators - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2000, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 2000, RewardDestination::Account(1))); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 5])); - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Account(3))); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21, 1])); // add a new validator candidate - assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1000, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(5), 1000, RewardDestination::Account(5))); assert_ok!(Staking::validate(RuntimeOrigin::signed(5), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(5), @@ -1928,11 +1913,11 @@ fn bond_with_no_staked_value() { .build_and_execute(|| { // Can't bond with 1 assert_noop!( - Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Controller), + Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1)), Error::::InsufficientBond, ); // bonded with absolute minimum value possible. - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 5, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 5, RewardDestination::Account(1))); assert_eq!(Balances::locks(&1)[0].amount, 5); // unbonding even 1 will cause all to be unbonded. @@ -1974,15 +1959,12 @@ fn bond_with_little_staked_value_bounded() { .build_and_execute(|| { // setup assert_ok!(Staking::chill(RuntimeOrigin::signed(31))); - assert_ok!(Staking::set_payee( - RuntimeOrigin::signed(11), - RewardDestination::Controller - )); + assert_ok!(Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Stash)); let init_balance_1 = Balances::free_balance(&1); let init_balance_11 = Balances::free_balance(&11); // Stingy validator. - assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(1), 1, RewardDestination::Account(1))); assert_ok!(Staking::validate(RuntimeOrigin::signed(1), ValidatorPrefs::default())); assert_ok!(Session::set_keys( RuntimeOrigin::signed(1), @@ -2061,14 +2043,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 11, 11, 21, 31])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21, 31])); @@ -2114,14 +2096,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { assert_ok!(Staking::bond( RuntimeOrigin::signed(1), 1000, - RewardDestination::Controller + RewardDestination::Account(1) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(1), vec![11, 11, 11, 21])); assert_ok!(Staking::bond( RuntimeOrigin::signed(3), 1000, - RewardDestination::Controller + RewardDestination::Account(3) )); assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21])); @@ -3530,8 +3512,8 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { let part_for_101 = Perbill::from_rational::(125, 1125); // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); + Payee::::insert(11, RewardDestination::Account(11)); + Payee::::insert(101, RewardDestination::Account(101)); Pallet::::reward_by_ids(vec![(11, 1)]); // Compute total payout now for whole duration as other parameter won't change @@ -3820,8 +3802,8 @@ fn test_multi_page_payout_stakers_by_page() { staking_events_since_last_call().as_slice(), &[ .., - Event::Rewarded { stash: 1063, dest: RewardDestination::Controller, amount: 111 }, - Event::Rewarded { stash: 1064, dest: RewardDestination::Controller, amount: 111 }, + Event::Rewarded { stash: 1063, dest: RewardDestination::Stash, amount: 111 }, + Event::Rewarded { stash: 1064, dest: RewardDestination::Stash, amount: 111 }, ] )); @@ -3843,8 +3825,8 @@ fn test_multi_page_payout_stakers_by_page() { events.as_slice(), &[ Event::PayoutStarted { era_index: 1, validator_stash: 11 }, - Event::Rewarded { stash: 1065, dest: RewardDestination::Controller, amount: 111 }, - Event::Rewarded { stash: 1066, dest: RewardDestination::Controller, amount: 111 }, + Event::Rewarded { stash: 1065, dest: RewardDestination::Stash, amount: 111 }, + Event::Rewarded { stash: 1066, dest: RewardDestination::Stash, amount: 111 }, .. ] )); @@ -4685,40 +4667,6 @@ fn offences_weight_calculated_correctly() { }); } -#[test] -fn payout_creates_controller() { - ExtBuilder::default().has_stakers(false).build_and_execute(|| { - let balance = 1000; - // Create a validator: - bond_validator(11, balance); - - // create a stash/controller pair and nominate - let (stash, controller) = testing_utils::create_unique_stash_controller::( - 0, - 100, - RewardDestination::Controller, - false, - ) - .unwrap(); - - assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![11])); - - // kill controller - assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(controller), stash, 100)); - assert_eq!(Balances::free_balance(controller), 0); - - mock::start_active_era(1); - Staking::reward_by_ids(vec![(11, 1)]); - // compute and ensure the reward amount is greater than zero. - let _ = current_total_payout_for_duration(reward_time_per_era()); - mock::start_active_era(2); - assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0)); - - // Controller is created - assert!(Balances::free_balance(controller) > 0); - }) -} - #[test] fn payout_to_any_account_works() { ExtBuilder::default().has_stakers(false).build_and_execute(|| { @@ -5462,7 +5410,7 @@ fn min_bond_checks_work() { .min_validator_bond(1_500) .build_and_execute(|| { // 500 is not enough for any role - assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Controller)); + assert_ok!(Staking::bond(RuntimeOrigin::signed(3), 500, RewardDestination::Stash)); assert_noop!( Staking::nominate(RuntimeOrigin::signed(3), vec![1]), Error::::InsufficientBond @@ -5524,19 +5472,11 @@ fn chill_other_works() { Balances::make_free_balance_be(&c, 100_000); // Nominator - assert_ok!(Staking::bond( - RuntimeOrigin::signed(a), - 1000, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(RuntimeOrigin::signed(a), 1000, RewardDestination::Stash)); assert_ok!(Staking::nominate(RuntimeOrigin::signed(a), vec![1])); // Validator - assert_ok!(Staking::bond( - RuntimeOrigin::signed(b), - 1500, - RewardDestination::Controller - )); + assert_ok!(Staking::bond(RuntimeOrigin::signed(b), 1500, RewardDestination::Stash)); assert_ok!(Staking::validate(RuntimeOrigin::signed(b), ValidatorPrefs::default())); } @@ -5683,7 +5623,7 @@ fn capped_stakers_works() { let (_, controller) = testing_utils::create_stash_controller::( i + 10_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_ok!(Staking::validate( @@ -5694,12 +5634,9 @@ fn capped_stakers_works() { } // but no more - let (_, last_validator) = testing_utils::create_stash_controller::( - 1337, - 100, - RewardDestination::Controller, - ) - .unwrap(); + let (_, last_validator) = + testing_utils::create_stash_controller::(1337, 100, RewardDestination::Stash) + .unwrap(); assert_noop!( Staking::validate(RuntimeOrigin::signed(last_validator), ValidatorPrefs::default()), @@ -5712,7 +5649,7 @@ fn capped_stakers_works() { let (_, controller) = testing_utils::create_stash_controller::( i + 20_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_ok!(Staking::nominate(RuntimeOrigin::signed(controller), vec![1])); @@ -5723,7 +5660,7 @@ fn capped_stakers_works() { let (_, last_nominator) = testing_utils::create_stash_controller::( 30_000_000, 100, - RewardDestination::Controller, + RewardDestination::Stash, ) .unwrap(); assert_noop!( @@ -6787,7 +6724,7 @@ mod ledger { assert_ok!(Staking::bond( RuntimeOrigin::signed(10), 100, - RewardDestination::Controller + RewardDestination::Account(10) )); assert_eq!(>::get(&10), Some(10)); @@ -6896,4 +6833,38 @@ mod ledger { >::remove(42); // ensures try-state checks pass. }) } + + #[test] + #[allow(deprecated)] + fn set_payee_errors_on_controller_destination() { + ExtBuilder::default().build_and_execute(|| { + Payee::::insert(11, RewardDestination::Staked); + assert_noop!( + Staking::set_payee(RuntimeOrigin::signed(11), RewardDestination::Controller), + Error::::ControllerDeprecated + ); + assert_eq!(Payee::::get(&11), RewardDestination::Staked); + }) + } + + #[test] + #[allow(deprecated)] + fn update_payee_migration_works() { + ExtBuilder::default().build_and_execute(|| { + // migrate a `Controller` variant to `Account` variant. + Payee::::insert(11, RewardDestination::Controller); + assert_eq!(Payee::::get(&11), RewardDestination::Controller); + assert_ok!(Staking::update_payee(RuntimeOrigin::signed(11), 11)); + assert_eq!(Payee::::get(&11), RewardDestination::Account(11)); + + // Do not migrate a variant if not `Controller`. + Payee::::insert(21, RewardDestination::Stash); + assert_eq!(Payee::::get(&21), RewardDestination::Stash); + assert_noop!( + Staking::update_payee(RuntimeOrigin::signed(11), 21), + Error::::NotController + ); + assert_eq!(Payee::::get(&21), RewardDestination::Stash); + }) + } } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index ad6dbccde9f..ad9ca75a032 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` @@ -59,6 +59,7 @@ pub trait WeightInfo { fn nominate(n: u32, ) -> Weight; fn chill() -> Weight; fn set_payee() -> Weight; + fn update_payee() -> Weight; fn set_controller() -> Weight; fn set_validator_count() -> Weight; fn force_no_eras() -> Weight; @@ -67,7 +68,6 @@ pub trait WeightInfo { fn set_invulnerables(v: u32, ) -> Weight; fn force_unstake(s: u32, ) -> Weight; fn cancel_deferred_slash(s: u32, ) -> Weight; - fn payout_stakers_dead_controller(n: u32, ) -> Weight; fn payout_stakers_alive_staked(n: u32, ) -> Weight; fn rebond(l: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; @@ -98,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 42_811_000 picoseconds. - Weight::from_parts(44_465_000, 4764) + // Minimum execution time: 43_427_000 picoseconds. + Weight::from_parts(45_221_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -119,8 +119,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 87_628_000 picoseconds. - Weight::from_parts(90_020_000, 8877) + // Minimum execution time: 87_100_000 picoseconds. + Weight::from_parts(90_599_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -146,8 +146,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 91_655_000 picoseconds. - Weight::from_parts(94_146_000, 8877) + // Minimum execution time: 91_488_000 picoseconds. + Weight::from_parts(94_126_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -166,10 +166,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 42_953_000 picoseconds. - Weight::from_parts(44_648_505, 4764) - // Standard Error: 937 - .saturating_add(Weight::from_parts(51_090, 0).saturating_mul(s.into())) + // Minimum execution time: 42_713_000 picoseconds. + Weight::from_parts(44_530_499, 4764) + // Standard Error: 1_067 + .saturating_add(Weight::from_parts(51_411, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -206,10 +206,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 89_218_000 picoseconds. - Weight::from_parts(97_761_884, 6248) - // Standard Error: 3_888 - .saturating_add(Weight::from_parts(1_346_441, 0).saturating_mul(s.into())) + // Minimum execution time: 88_700_000 picoseconds. + Weight::from_parts(98_329_624, 6248) + // Standard Error: 4_477 + .saturating_add(Weight::from_parts(1_347_380, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -241,8 +241,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 51_200_000 picoseconds. - Weight::from_parts(53_403_000, 4556) + // Minimum execution time: 51_877_000 picoseconds. + Weight::from_parts(53_503_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -255,10 +255,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 28_963_000 picoseconds. - Weight::from_parts(29_884_371, 4556) - // Standard Error: 9_063 - .saturating_add(Weight::from_parts(6_532_967, 0).saturating_mul(k.into())) + // Minimum execution time: 29_604_000 picoseconds. + Weight::from_parts(31_726_296, 4556) + // Standard Error: 6_350 + .saturating_add(Weight::from_parts(6_416_846, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -291,10 +291,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 64_644_000 picoseconds. - Weight::from_parts(62_855_016, 6248) - // Standard Error: 17_528 - .saturating_add(Weight::from_parts(3_993_850, 0).saturating_mul(n.into())) + // Minimum execution time: 64_276_000 picoseconds. + Weight::from_parts(62_615_844, 6248) + // Standard Error: 14_914 + .saturating_add(Weight::from_parts(4_097_019, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -318,8 +318,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 54_505_000 picoseconds. - Weight::from_parts(56_026_000, 6248) + // Minimum execution time: 55_064_000 picoseconds. + Weight::from_parts(56_566_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -333,11 +333,26 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_639_000 picoseconds. - Weight::from_parts(17_342_000, 4556) + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_519_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `969` + // Estimated: `4556` + // Minimum execution time: 20_545_000 picoseconds. + Weight::from_parts(21_395_000, 4556) + .saturating_add(T::DbWeight::get().reads(3_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:2) @@ -346,8 +361,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 20_334_000 picoseconds. - Weight::from_parts(21_067_000, 4556) + // Minimum execution time: 20_179_000 picoseconds. + Weight::from_parts(20_728_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -357,8 +372,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_680_000 picoseconds. - Weight::from_parts(2_774_000, 0) + // Minimum execution time: 2_594_000 picoseconds. + Weight::from_parts(2_777_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -367,8 +382,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_613_000 picoseconds. - Weight::from_parts(8_922_000, 0) + // Minimum execution time: 8_302_000 picoseconds. + Weight::from_parts(8_741_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -377,8 +392,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_657_000 picoseconds. - Weight::from_parts(9_020_000, 0) + // Minimum execution time: 8_504_000 picoseconds. + Weight::from_parts(8_774_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -387,8 +402,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_600_000 picoseconds. - Weight::from_parts(9_157_000, 0) + // Minimum execution time: 8_474_000 picoseconds. + Weight::from_parts(8_740_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -398,10 +413,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(3_293_694, 0) - // Standard Error: 31 - .saturating_add(Weight::from_parts(10_668, 0).saturating_mul(v.into())) + // Minimum execution time: 2_732_000 picoseconds. + Weight::from_parts(3_360_048, 0) + // Standard Error: 34 + .saturating_add(Weight::from_parts(9_964, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) @@ -437,10 +452,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_537_000 picoseconds. - Weight::from_parts(95_127_637, 6248) - // Standard Error: 3_902 - .saturating_add(Weight::from_parts(1_336_182, 0).saturating_mul(s.into())) + // Minimum execution time: 87_495_000 picoseconds. + Weight::from_parts(95_710_388, 6248) + // Standard Error: 5_849 + .saturating_add(Weight::from_parts(1_339_335, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -453,54 +468,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 100_777_000 picoseconds. - Weight::from_parts(896_540_406, 70137) - // Standard Error: 57_788 - .saturating_add(Weight::from_parts(4_870_910, 0).saturating_mul(s.into())) + // Minimum execution time: 98_255_000 picoseconds. + Weight::from_parts(892_807_378, 70137) + // Standard Error: 57_735 + .saturating_add(Weight::from_parts(4_876_449, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) - /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) - /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:257 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:258 w:258) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::ClaimedRewards` (r:1 w:1) - /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) - /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) - /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) - /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:257 w:0) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 256]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `21644 + n * (155 ±0)` - // Estimated: `21412 + n * (2603 ±0)` - // Minimum execution time: 133_129_000 picoseconds. - Weight::from_parts(190_983_630, 21412) - // Standard Error: 17_497 - .saturating_add(Weight::from_parts(24_723_153, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(14_u64)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(5_u64)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } /// Storage: `Staking::Bonded` (r:257 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:257 w:257) @@ -534,10 +508,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±3)` - // Minimum execution time: 149_773_000 picoseconds. - Weight::from_parts(151_527_124, 30944) - // Standard Error: 24_152 - .saturating_add(Weight::from_parts(46_124_074, 0).saturating_mul(n.into())) + // Minimum execution time: 144_258_000 picoseconds. + Weight::from_parts(175_256_796, 30944) + // Standard Error: 24_339 + .saturating_add(Weight::from_parts(46_011_700, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -561,10 +535,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 81_618_000 picoseconds. - Weight::from_parts(85_245_630, 8877) - // Standard Error: 5_049 - .saturating_add(Weight::from_parts(39_811, 0).saturating_mul(l.into())) + // Minimum execution time: 83_532_000 picoseconds. + Weight::from_parts(86_757_943, 8877) + // Standard Error: 6_379 + .saturating_add(Weight::from_parts(57_566, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -599,10 +573,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 95_395_000 picoseconds. - Weight::from_parts(100_459_234, 6248) - // Standard Error: 3_781 - .saturating_add(Weight::from_parts(1_333_607, 0).saturating_mul(s.into())) + // Minimum execution time: 96_776_000 picoseconds. + Weight::from_parts(100_950_027, 6248) + // Standard Error: 4_719 + .saturating_add(Weight::from_parts(1_432_091, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -648,12 +622,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 571_337_000 picoseconds. - Weight::from_parts(578_857_000, 512390) - // Standard Error: 2_090_511 - .saturating_add(Weight::from_parts(68_626_083, 0).saturating_mul(v.into())) - // Standard Error: 208_307 - .saturating_add(Weight::from_parts(18_645_374, 0).saturating_mul(n.into())) + // Minimum execution time: 577_699_000 picoseconds. + Weight::from_parts(582_827_000, 512390) + // Standard Error: 2_000_851 + .saturating_add(Weight::from_parts(67_316_744, 0).saturating_mul(v.into())) + // Standard Error: 199_373 + .saturating_add(Weight::from_parts(18_503_387, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -684,12 +658,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_590_734_000 picoseconds. - Weight::from_parts(35_238_091_000, 512390) - // Standard Error: 427_974 - .saturating_add(Weight::from_parts(5_084_196, 0).saturating_mul(v.into())) - // Standard Error: 427_974 - .saturating_add(Weight::from_parts(4_503_420, 0).saturating_mul(n.into())) + // Minimum execution time: 34_048_778_000 picoseconds. + Weight::from_parts(34_397_777_000, 512390) + // Standard Error: 346_115 + .saturating_add(Weight::from_parts(3_704_941, 0).saturating_mul(v.into())) + // Standard Error: 346_115 + .saturating_add(Weight::from_parts(4_064_819, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -706,10 +680,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_509_588_000 picoseconds. - Weight::from_parts(89_050_539, 3510) - // Standard Error: 11_803 - .saturating_add(Weight::from_parts(5_031_416, 0).saturating_mul(v.into())) + // Minimum execution time: 2_473_149_000 picoseconds. + Weight::from_parts(84_721_859, 3510) + // Standard Error: 8_690 + .saturating_add(Weight::from_parts(4_870_439, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -730,8 +704,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_347_000 picoseconds. - Weight::from_parts(5_562_000, 0) + // Minimum execution time: 5_312_000 picoseconds. + Weight::from_parts(5_897_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) @@ -750,8 +724,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_725_000 picoseconds. - Weight::from_parts(5_075_000, 0) + // Minimum execution time: 4_676_000 picoseconds. + Weight::from_parts(4_913_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -778,8 +752,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1773` // Estimated: `6248` - // Minimum execution time: 67_204_000 picoseconds. - Weight::from_parts(69_197_000, 6248) + // Minimum execution time: 67_286_000 picoseconds. + Weight::from_parts(69_081_000, 6248) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -791,8 +765,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_497_000 picoseconds. - Weight::from_parts(12_943_000, 3510) + // Minimum execution time: 12_749_000 picoseconds. + Weight::from_parts(13_275_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -802,8 +776,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_245_000 picoseconds. - Weight::from_parts(3_352_000, 0) + // Minimum execution time: 3_155_000 picoseconds. + Weight::from_parts(3_319_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -824,8 +798,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 42_811_000 picoseconds. - Weight::from_parts(44_465_000, 4764) + // Minimum execution time: 43_427_000 picoseconds. + Weight::from_parts(45_221_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -845,8 +819,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 87_628_000 picoseconds. - Weight::from_parts(90_020_000, 8877) + // Minimum execution time: 87_100_000 picoseconds. + Weight::from_parts(90_599_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -872,8 +846,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 91_655_000 picoseconds. - Weight::from_parts(94_146_000, 8877) + // Minimum execution time: 91_488_000 picoseconds. + Weight::from_parts(94_126_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -892,10 +866,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 42_953_000 picoseconds. - Weight::from_parts(44_648_505, 4764) - // Standard Error: 937 - .saturating_add(Weight::from_parts(51_090, 0).saturating_mul(s.into())) + // Minimum execution time: 42_713_000 picoseconds. + Weight::from_parts(44_530_499, 4764) + // Standard Error: 1_067 + .saturating_add(Weight::from_parts(51_411, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -932,10 +906,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 89_218_000 picoseconds. - Weight::from_parts(97_761_884, 6248) - // Standard Error: 3_888 - .saturating_add(Weight::from_parts(1_346_441, 0).saturating_mul(s.into())) + // Minimum execution time: 88_700_000 picoseconds. + Weight::from_parts(98_329_624, 6248) + // Standard Error: 4_477 + .saturating_add(Weight::from_parts(1_347_380, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -967,8 +941,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 51_200_000 picoseconds. - Weight::from_parts(53_403_000, 4556) + // Minimum execution time: 51_877_000 picoseconds. + Weight::from_parts(53_503_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -981,10 +955,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 28_963_000 picoseconds. - Weight::from_parts(29_884_371, 4556) - // Standard Error: 9_063 - .saturating_add(Weight::from_parts(6_532_967, 0).saturating_mul(k.into())) + // Minimum execution time: 29_604_000 picoseconds. + Weight::from_parts(31_726_296, 4556) + // Standard Error: 6_350 + .saturating_add(Weight::from_parts(6_416_846, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -1017,10 +991,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 64_644_000 picoseconds. - Weight::from_parts(62_855_016, 6248) - // Standard Error: 17_528 - .saturating_add(Weight::from_parts(3_993_850, 0).saturating_mul(n.into())) + // Minimum execution time: 64_276_000 picoseconds. + Weight::from_parts(62_615_844, 6248) + // Standard Error: 14_914 + .saturating_add(Weight::from_parts(4_097_019, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1044,8 +1018,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 54_505_000 picoseconds. - Weight::from_parts(56_026_000, 6248) + // Minimum execution time: 55_064_000 picoseconds. + Weight::from_parts(56_566_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1059,11 +1033,26 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_639_000 picoseconds. - Weight::from_parts(17_342_000, 4556) + // Minimum execution time: 16_626_000 picoseconds. + Weight::from_parts(17_519_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `Staking::Ledger` (r:1 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Payee` (r:1 w:1) + /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + fn update_payee() -> Weight { + // Proof Size summary in bytes: + // Measured: `969` + // Estimated: `4556` + // Minimum execution time: 20_545_000 picoseconds. + Weight::from_parts(21_395_000, 4556) + .saturating_add(RocksDbWeight::get().reads(3_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:2) @@ -1072,8 +1061,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 20_334_000 picoseconds. - Weight::from_parts(21_067_000, 4556) + // Minimum execution time: 20_179_000 picoseconds. + Weight::from_parts(20_728_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1083,8 +1072,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_680_000 picoseconds. - Weight::from_parts(2_774_000, 0) + // Minimum execution time: 2_594_000 picoseconds. + Weight::from_parts(2_777_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1093,8 +1082,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_613_000 picoseconds. - Weight::from_parts(8_922_000, 0) + // Minimum execution time: 8_302_000 picoseconds. + Weight::from_parts(8_741_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1103,8 +1092,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_657_000 picoseconds. - Weight::from_parts(9_020_000, 0) + // Minimum execution time: 8_504_000 picoseconds. + Weight::from_parts(8_774_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1113,8 +1102,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_600_000 picoseconds. - Weight::from_parts(9_157_000, 0) + // Minimum execution time: 8_474_000 picoseconds. + Weight::from_parts(8_740_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -1124,10 +1113,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_792_000 picoseconds. - Weight::from_parts(3_293_694, 0) - // Standard Error: 31 - .saturating_add(Weight::from_parts(10_668, 0).saturating_mul(v.into())) + // Minimum execution time: 2_732_000 picoseconds. + Weight::from_parts(3_360_048, 0) + // Standard Error: 34 + .saturating_add(Weight::from_parts(9_964, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) @@ -1163,10 +1152,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 86_537_000 picoseconds. - Weight::from_parts(95_127_637, 6248) - // Standard Error: 3_902 - .saturating_add(Weight::from_parts(1_336_182, 0).saturating_mul(s.into())) + // Minimum execution time: 87_495_000 picoseconds. + Weight::from_parts(95_710_388, 6248) + // Standard Error: 5_849 + .saturating_add(Weight::from_parts(1_339_335, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1179,54 +1168,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 100_777_000 picoseconds. - Weight::from_parts(896_540_406, 70137) - // Standard Error: 57_788 - .saturating_add(Weight::from_parts(4_870_910, 0).saturating_mul(s.into())) + // Minimum execution time: 98_255_000 picoseconds. + Weight::from_parts(892_807_378, 70137) + // Standard Error: 57_735 + .saturating_add(Weight::from_parts(4_876_449, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) - /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasValidatorReward` (r:1 w:0) - /// Proof: `Staking::ErasValidatorReward` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:257 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Balances::Locks` (r:1 w:1) - /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) - /// Storage: `Balances::Freezes` (r:1 w:0) - /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:258 w:258) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::ClaimedRewards` (r:1 w:1) - /// Proof: `Staking::ClaimedRewards` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasStakersPaged` (r:1 w:0) - /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasRewardPoints` (r:1 w:0) - /// Proof: `Staking::ErasRewardPoints` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::ErasValidatorPrefs` (r:1 w:0) - /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) - /// Storage: `Staking::Payee` (r:257 w:0) - /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 256]`. - fn payout_stakers_dead_controller(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `21644 + n * (155 ±0)` - // Estimated: `21412 + n * (2603 ±0)` - // Minimum execution time: 133_129_000 picoseconds. - Weight::from_parts(190_983_630, 21412) - // Standard Error: 17_497 - .saturating_add(Weight::from_parts(24_723_153, 0).saturating_mul(n.into())) - .saturating_add(RocksDbWeight::get().reads(14_u64)) - .saturating_add(RocksDbWeight::get().reads((3_u64).saturating_mul(n.into()))) - .saturating_add(RocksDbWeight::get().writes(5_u64)) - .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(n.into())) - } /// Storage: `Staking::Bonded` (r:257 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:257 w:257) @@ -1260,10 +1208,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` // Estimated: `30944 + n * (3774 ±3)` - // Minimum execution time: 149_773_000 picoseconds. - Weight::from_parts(151_527_124, 30944) - // Standard Error: 24_152 - .saturating_add(Weight::from_parts(46_124_074, 0).saturating_mul(n.into())) + // Minimum execution time: 144_258_000 picoseconds. + Weight::from_parts(175_256_796, 30944) + // Standard Error: 24_339 + .saturating_add(Weight::from_parts(46_011_700, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1287,10 +1235,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 81_618_000 picoseconds. - Weight::from_parts(85_245_630, 8877) - // Standard Error: 5_049 - .saturating_add(Weight::from_parts(39_811, 0).saturating_mul(l.into())) + // Minimum execution time: 83_532_000 picoseconds. + Weight::from_parts(86_757_943, 8877) + // Standard Error: 6_379 + .saturating_add(Weight::from_parts(57_566, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1325,10 +1273,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 95_395_000 picoseconds. - Weight::from_parts(100_459_234, 6248) - // Standard Error: 3_781 - .saturating_add(Weight::from_parts(1_333_607, 0).saturating_mul(s.into())) + // Minimum execution time: 96_776_000 picoseconds. + Weight::from_parts(100_950_027, 6248) + // Standard Error: 4_719 + .saturating_add(Weight::from_parts(1_432_091, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1374,12 +1322,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 571_337_000 picoseconds. - Weight::from_parts(578_857_000, 512390) - // Standard Error: 2_090_511 - .saturating_add(Weight::from_parts(68_626_083, 0).saturating_mul(v.into())) - // Standard Error: 208_307 - .saturating_add(Weight::from_parts(18_645_374, 0).saturating_mul(n.into())) + // Minimum execution time: 577_699_000 picoseconds. + Weight::from_parts(582_827_000, 512390) + // Standard Error: 2_000_851 + .saturating_add(Weight::from_parts(67_316_744, 0).saturating_mul(v.into())) + // Standard Error: 199_373 + .saturating_add(Weight::from_parts(18_503_387, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1410,12 +1358,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_590_734_000 picoseconds. - Weight::from_parts(35_238_091_000, 512390) - // Standard Error: 427_974 - .saturating_add(Weight::from_parts(5_084_196, 0).saturating_mul(v.into())) - // Standard Error: 427_974 - .saturating_add(Weight::from_parts(4_503_420, 0).saturating_mul(n.into())) + // Minimum execution time: 34_048_778_000 picoseconds. + Weight::from_parts(34_397_777_000, 512390) + // Standard Error: 346_115 + .saturating_add(Weight::from_parts(3_704_941, 0).saturating_mul(v.into())) + // Standard Error: 346_115 + .saturating_add(Weight::from_parts(4_064_819, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1432,10 +1380,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_509_588_000 picoseconds. - Weight::from_parts(89_050_539, 3510) - // Standard Error: 11_803 - .saturating_add(Weight::from_parts(5_031_416, 0).saturating_mul(v.into())) + // Minimum execution time: 2_473_149_000 picoseconds. + Weight::from_parts(84_721_859, 3510) + // Standard Error: 8_690 + .saturating_add(Weight::from_parts(4_870_439, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1456,8 +1404,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_347_000 picoseconds. - Weight::from_parts(5_562_000, 0) + // Minimum execution time: 5_312_000 picoseconds. + Weight::from_parts(5_897_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) @@ -1476,8 +1424,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_725_000 picoseconds. - Weight::from_parts(5_075_000, 0) + // Minimum execution time: 4_676_000 picoseconds. + Weight::from_parts(4_913_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::Ledger` (r:1 w:0) @@ -1504,8 +1452,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1773` // Estimated: `6248` - // Minimum execution time: 67_204_000 picoseconds. - Weight::from_parts(69_197_000, 6248) + // Minimum execution time: 67_286_000 picoseconds. + Weight::from_parts(69_081_000, 6248) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1517,8 +1465,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_497_000 picoseconds. - Weight::from_parts(12_943_000, 3510) + // Minimum execution time: 12_749_000 picoseconds. + Weight::from_parts(13_275_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1528,8 +1476,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_245_000 picoseconds. - Weight::from_parts(3_352_000, 0) + // Minimum execution time: 3_155_000 picoseconds. + Weight::from_parts(3_319_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } -- GitLab From a9ba1e540ff524dda0dec8bb0b7da204e043adae Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Wed, 22 Nov 2023 10:29:56 +0100 Subject: [PATCH 096/199] Revert docker images tag naming to (#2434) Docker images from paritypr are also used in testnets. PR reverts docker tags naming to a more convenient. cc @PierreBesson --- .gitlab-ci.yml | 2 +- .gitlab/pipeline/publish.yml | 201 +---------------------------------- 2 files changed, 5 insertions(+), 198 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e698aba81d5..f7e54961fa9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ variables: RUSTY_CACHIER_COMPRESSION_METHOD: zstd NEXTEST_FAILURE_OUTPUT: immediate-final NEXTEST_SUCCESS_OUTPUT: final - DOCKER_IMAGES_VERSION: "${CI_COMMIT_SHA}" + DOCKER_IMAGES_VERSION: "${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHORT_SHA}" default: retry: diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml index 3cc2002cc1c..92ebc9eea1f 100644 --- a/.gitlab/pipeline/publish.yml +++ b/.gitlab/pipeline/publish.yml @@ -63,16 +63,16 @@ publish-rustdoc: after_script: - rm -rf .git/ ./* -# cumulus - +# note: images are used not only in zombienet but also in rococo, wococo and versi .build-push-image: image: $BUILDAH_IMAGE variables: DOCKERFILE: "" # docker/path-to.Dockerfile IMAGE_NAME: "" # docker.io/paritypr/image_name script: - # Exit if the job is not running in a merge queue - # - if [[ $CI_COMMIT_REF_NAME != *"gh-readonly-queue"* ]]; then echo "I will run only in a merge queue"; exit 0; fi + # Dockertag should differ in a merge queue + # TODO: test this + # - if [[ $CI_COMMIT_REF_NAME == *"gh-readonly-queue"* ]]; export DOCKER_IMAGES_VERSION="${CI_COMMIT_SHORT_SHA}"; fi - $BUILDAH_COMMAND build --format=docker --build-arg VCS_REF="${CI_COMMIT_SHA}" @@ -112,59 +112,6 @@ build-push-image-test-parachain: variables: DOCKERFILE: "docker/dockerfiles/test-parachain_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/test-parachain" -# publish-s3: -# stage: publish -# extends: -# - .kubernetes-env -# - .publish-refs -# image: paritytech/awscli:latest -# needs: -# - job: build-linux-stable-cumulus -# artifacts: true -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/${ARCH}-${DOCKER_OS}" -# script: -# - echo "___Publishing a binary with debug assertions!___" -# - echo "___VERSION = $(cat ./artifacts/VERSION) ___" -# - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/ -# - echo "___Updating objects in latest path___" -# - aws s3 sync s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/VERSION)/ s3://${BUCKET}/${PREFIX}/latest/ -# after_script: -# - aws s3 ls s3://${BUCKET}/${PREFIX}/latest/ -# --recursive --human-readable --summarize - -# publish-benchmarks-assets-s3: &publish-benchmarks -# stage: publish -# extends: -# - .kubernetes-env -# - .benchmarks-refs -# image: paritytech/awscli:latest -# needs: -# - job: benchmarks-assets -# artifacts: true -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/$CI_COMMIT_REF_NAME/benchmarks-assets" -# script: -# - echo "___Publishing benchmark results___" -# - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/ -# after_script: -# - aws s3 ls s3://${BUCKET}/${PREFIX}/ --recursive --human-readable --summarize - -# publish-benchmarks-collectives-s3: -# <<: *publish-benchmarks -# variables: -# GIT_STRATEGY: none -# BUCKET: "releases.parity.io" -# PREFIX: "cumulus/$CI_COMMIT_REF_NAME/benchmarks-collectives" -# needs: -# - job: benchmarks-collectives -# artifacts: true - -### Polkadot build-push-image-polkadot-debug: stage: publish @@ -217,143 +164,3 @@ build-push-image-substrate-pr: variables: DOCKERFILE: "docker/dockerfiles/substrate_injected.Dockerfile" IMAGE_NAME: "docker.io/paritypr/substrate" -# old way - -# .build-push-image-polkadot: -# before_script: -# # - test -s ./artifacts/VERSION || exit 1 -# # - test -s ./artifacts/EXTRATAG || exit 1 -# - VERSION="$(cat ./artifacts/VERSION)" -# - EXTRATAG="$(cat ./artifacts/EXTRATAG)" -# - echo "Polkadot version = ${VERSION} (EXTRATAG = ${EXTRATAG})" -# script: -# # - test "$DOCKER_USER" -a "$DOCKER_PASS" || -# # ( echo "no docker credentials provided"; exit 1 ) -# - cd ./artifacts -# - $BUILDAH_COMMAND build -# --format=docker -# --build-arg VCS_REF="${CI_COMMIT_SHA}" -# --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" -# --build-arg IMAGE_NAME="${IMAGE_NAME}" -# --tag "$IMAGE_NAME:$VERSION" -# --tag "$IMAGE_NAME:$EXTRATAG" -# --file ${DOCKERFILE} . -# # The job will success only on the protected branch -# # - echo "$DOCKER_PASS" | -# # buildah login --username "$DOCKER_USER" --password-stdin docker.io -# # - $BUILDAH_COMMAND info -# # - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$VERSION" -# # - $BUILDAH_COMMAND push --format=v2s2 "$IMAGE_NAME:$EXTRATAG" -# after_script: -# - buildah logout --all - -# publish-polkadot-debug-image: -# stage: publish -# image: ${BUILDAH_IMAGE} -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# rules: -# - if: $CI_PIPELINE_SOURCE == "web" -# - if: $CI_PIPELINE_SOURCE == "schedule" -# - if: $CI_COMMIT_REF_NAME == "master" -# - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs -# - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 -# variables: -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/polkadot/polkadot_injected_debug.Dockerfile -# DOCKERFILE: polkadot_injected_debug.Dockerfile -# IMAGE_NAME: docker.io/paritypr/polkadot-debug -# needs: -# - job: build-linux-stable -# artifacts: true -# after_script: -# # pass artifacts to the zombienet-tests job -# # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance -# - echo "PARACHAINS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/parachains.env -# - echo "PARACHAINS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/parachains.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/parachains.env -# expire_in: 1 days - -# publish-test-collators-image: -# # service image for zombienet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/collator_injected.Dockerfile -# DOCKERFILE: collator_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/colander -# needs: -# - job: build-test-collators -# artifacts: true -# after_script: -# - buildah logout --all -# # pass artifacts to the zombienet-tests job -# - echo "COLLATOR_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/collator.env -# - echo "COLLATOR_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/collator.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/collator.env - -# publish-malus-image: -# # service image for Simnet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# # docker/dockerfiles/malus_injected.Dockerfile -# DOCKERFILE: malus_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/malus -# needs: -# - job: build-malus -# artifacts: true -# after_script: -# - buildah logout "$IMAGE_NAME" -# # pass artifacts to the zombienet-tests job -# - echo "MALUS_IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/malus.env -# - echo "MALUS_IMAGE_TAG=$(cat ./artifacts/EXTRATAG)" >> ./artifacts/malus.env -# artifacts: -# reports: -# # this artifact is used in zombienet-tests job -# dotenv: ./artifacts/malus.env - -# substrate - -# publish-substrate-image-pr: -# # service image for zombienet -# stage: publish -# extends: -# - .kubernetes-env -# - .build-push-image-polkadot -# - .zombienet-refs -# variables: -# CI_IMAGE: ${BUILDAH_IMAGE} -# GIT_STRATEGY: none -# DOCKER_USER: ${PARITYPR_USER} -# DOCKER_PASS: ${PARITYPR_PASS} -# DOCKERFILE: substrate_injected.Dockerfile -# IMAGE_NAME: docker.io/paritypr/substrate -# needs: -# - job: build-linux-substrate -# artifacts: true -# after_script: -# - buildah logout "$IMAGE_NAME" -- GitLab From 98f9e2ea9d5d034e85de7a87f6ba941827bc1a13 Mon Sep 17 00:00:00 2001 From: gupnik <17176722+gupnik@users.noreply.github.com> Date: Wed, 22 Nov 2023 17:32:49 +0530 Subject: [PATCH 097/199] Fixes import path in benchmark macro (#2437) Fully-qualified path was not being used in benchmark macro for one of the cases. This PR fixes this and removes the unnecessary import in a couple of files, which I believe was done to fix this issue. --- .../bin/node-template/pallets/template/src/benchmarking.rs | 1 - substrate/frame/asset-rate/src/benchmarking.rs | 1 - substrate/frame/support/procedural/src/benchmark.rs | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/substrate/bin/node-template/pallets/template/src/benchmarking.rs b/substrate/bin/node-template/pallets/template/src/benchmarking.rs index 6c3cae6066b..5a262417629 100644 --- a/substrate/bin/node-template/pallets/template/src/benchmarking.rs +++ b/substrate/bin/node-template/pallets/template/src/benchmarking.rs @@ -1,7 +1,6 @@ //! Benchmarking setup for pallet-template #![cfg(feature = "runtime-benchmarks")] use super::*; -use sp_std::vec; #[allow(unused)] use crate::Pallet as Template; diff --git a/substrate/frame/asset-rate/src/benchmarking.rs b/substrate/frame/asset-rate/src/benchmarking.rs index 21d53a89e39..6fcc7c7fadb 100644 --- a/substrate/frame/asset-rate/src/benchmarking.rs +++ b/substrate/frame/asset-rate/src/benchmarking.rs @@ -25,7 +25,6 @@ use frame_benchmarking::v2::*; use frame_support::assert_ok; use frame_system::RawOrigin; use sp_core::crypto::FromEntropy; -use sp_std::vec; /// Trait describing the factory function for the `AssetKind` parameter. pub trait AssetKindFactory { diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index fb55e8c9f66..6ded82d91aa 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -517,7 +517,7 @@ pub fn benchmarks( components, // TODO: Not supported by V2 syntax as of yet. // https://github.com/paritytech/substrate/issues/13132 - pov_modes: vec![], + pov_modes: #krate::__private::vec![], } }).collect::<#krate::__private::Vec<_>>() } -- GitLab From 408af9b32d95acbbac5e18bee66fd1b74230a699 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 22 Nov 2023 15:45:52 +0100 Subject: [PATCH 098/199] PVF: Fix unshare "no such file or directory" error (#2426) --- .gitlab/pipeline/test.yml | 2 +- Cargo.lock | 1 + polkadot/node/core/pvf/Cargo.toml | 1 + polkadot/node/core/pvf/common/src/lib.rs | 2 +- polkadot/node/core/pvf/src/artifacts.rs | 5 +- .../node/core/pvf/src/execute/worker_intf.rs | 4 +- polkadot/node/core/pvf/src/host.rs | 9 +- .../node/core/pvf/src/prepare/worker_intf.rs | 4 +- polkadot/node/core/pvf/src/security.rs | 19 ++-- polkadot/node/core/pvf/src/worker_intf.rs | 105 ++++++++---------- polkadot/node/core/pvf/tests/it/main.rs | 32 ++++++ .../functional/0001-parachains-pvf.zndsl | 4 +- 12 files changed, 105 insertions(+), 83 deletions(-) diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 4ed3ec19c48..79ef9070dba 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -29,7 +29,7 @@ test-linux-stable: --locked \ --release \ --no-fail-fast \ - --features try-runtime,experimental \ + --features try-runtime,experimental,ci-only-tests \ --partition count:${CI_NODE_INDEX}/${CI_NODE_TOTAL} # Upload tests results to Elasticsearch - echo "Upload test results to Elasticsearch" diff --git a/Cargo.lock b/Cargo.lock index 903e9463cf6..bfe24e528eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12591,6 +12591,7 @@ dependencies = [ "rand 0.8.5", "rococo-runtime", "rusty-fork", + "sc-sysinfo", "slotmap", "sp-core", "sp-maybe-compressed-blob", diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 27da484fe4f..05509a5f853 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -54,6 +54,7 @@ halt = { package = "test-parachain-halt", path = "../../../parachain/test-parach [target.'cfg(target_os = "linux")'.dev-dependencies] procfs = "0.16.0" rusty-fork = "0.3.0" +sc-sysinfo = { path = "../../../../substrate/client/sysinfo" } [[bench]] name = "host_prepare_rococo_runtime" diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index 278fa3fe821..c041c60aaf9 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -47,7 +47,7 @@ pub mod tests { } /// Status of security features on the current system. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct SecurityStatus { /// Whether the landlock features we use are fully available on this system. pub can_enable_landlock: bool, diff --git a/polkadot/node/core/pvf/src/artifacts.rs b/polkadot/node/core/pvf/src/artifacts.rs index 53085eade3c..79b53467b4e 100644 --- a/polkadot/node/core/pvf/src/artifacts.rs +++ b/polkadot/node/core/pvf/src/artifacts.rs @@ -499,8 +499,7 @@ mod tests { #[tokio::test] async fn remove_stale_cache_on_startup() { - let cache_dir = crate::worker_intf::tmppath("test-cache").await.unwrap(); - fs::create_dir_all(&cache_dir).unwrap(); + let cache_dir = tempfile::Builder::new().prefix("test-cache-").tempdir().unwrap(); // invalid prefix create_rand_artifact(&cache_dir, ""); @@ -529,7 +528,7 @@ mod tests { assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 7); - let artifacts = Artifacts::new_and_prune(&cache_dir).await; + let artifacts = Artifacts::new_and_prune(cache_dir.path()).await; assert_eq!(fs::read_dir(&cache_dir).unwrap().count(), 1); assert_eq!(artifacts.len(), 1); diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_intf.rs index 4faaf13e62f..fc6cd8fc725 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_intf.rs @@ -278,7 +278,7 @@ where // Cheaply create a hard link to the artifact. The artifact is always at a known location in the // worker cache, and the child can't access any other artifacts or gain any information from the // original filename. - let link_path = worker_dir::execute_artifact(&worker_dir.path); + let link_path = worker_dir::execute_artifact(worker_dir.path()); if let Err(err) = tokio::fs::hard_link(artifact_path, link_path).await { gum::warn!( target: LOG_TARGET, @@ -292,7 +292,7 @@ where } } - let worker_dir_path = worker_dir.path.clone(); + let worker_dir_path = worker_dir.path().to_owned(); let outcome = f(worker_dir).await; // Try to clear the worker dir. diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 6049f51834e..be8f7aee778 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -24,7 +24,7 @@ use crate::{ artifacts::{ArtifactId, ArtifactPathId, ArtifactState, Artifacts}, execute::{self, PendingExecutionRequest}, metrics::Metrics, - prepare, security, Priority, ValidationError, LOG_TARGET, + prepare, security, Priority, SecurityStatus, ValidationError, LOG_TARGET, }; use always_assert::never; use futures::{ @@ -70,6 +70,8 @@ pub(crate) type PrecheckResultSender = oneshot::Sender; #[derive(Clone)] pub struct ValidationHost { to_host_tx: mpsc::Sender, + /// Available security features, detected by the host during startup. + pub security_status: SecurityStatus, } impl ValidationHost { @@ -216,7 +218,7 @@ pub async fn start( let (to_host_tx, to_host_rx) = mpsc::channel(10); - let validation_host = ValidationHost { to_host_tx }; + let validation_host = ValidationHost { to_host_tx, security_status: security_status.clone() }; let (to_prepare_pool, from_prepare_pool, run_prepare_pool) = prepare::start_pool( metrics.clone(), @@ -978,7 +980,8 @@ pub(crate) mod tests { fn host_handle(&mut self) -> ValidationHost { let to_host_tx = self.to_host_tx.take().unwrap(); - ValidationHost { to_host_tx } + let security_status = Default::default(); + ValidationHost { to_host_tx, security_status } } async fn poll_and_recv_result(&mut self, result_rx: oneshot::Receiver) -> T diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index 318aea7295d..f978005068c 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -318,7 +318,7 @@ where { // Create the tmp file here so that the child doesn't need any file creation rights. This will // be cleared at the end of this function. - let tmp_file = worker_dir::prepare_tmp_artifact(&worker_dir.path); + let tmp_file = worker_dir::prepare_tmp_artifact(worker_dir.path()); if let Err(err) = tokio::fs::File::create(&tmp_file).await { gum::warn!( target: LOG_TARGET, @@ -333,7 +333,7 @@ where } }; - let worker_dir_path = worker_dir.path.clone(); + let worker_dir_path = worker_dir.path().to_owned(); let outcome = f(tmp_file, stream, worker_dir).await; // Try to clear the worker dir. diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs index 8c06c68392f..ef8714f58c8 100644 --- a/polkadot/node/core/pvf/src/security.rs +++ b/polkadot/node/core/pvf/src/security.rs @@ -28,7 +28,7 @@ const SECURE_MODE_ANNOUNCEMENT: &'static str = /// Run checks for supported security features. /// -/// # Return +/// # Returns /// /// Returns the set of security features that we were able to enable. If an error occurs while /// enabling a security feature we set the corresponding status to `false`. @@ -158,18 +158,15 @@ async fn check_can_unshare_user_namespace_and_change_root( ) -> SecureModeResult { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { - let cache_dir_tempdir = - crate::worker_intf::tmppath_in("check-can-unshare", cache_path) - .await - .map_err( - |err| - SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( - format!("could not create a temporary directory in {:?}: {}", cache_path, err) - ) - )?; + let cache_dir_tempdir = tempfile::Builder::new() + .prefix("check-can-unshare-") + .tempdir_in(cache_path) + .map_err(|err| SecureModeError::CannotUnshareUserNamespaceAndChangeRoot( + format!("could not create a temporary directory in {:?}: {}", cache_path, err) + ))?; match tokio::process::Command::new(prepare_worker_program_path) .arg("--check-can-unshare-user-namespace-and-change-root") - .arg(cache_dir_tempdir) + .arg(cache_dir_tempdir.path()) .output() .await { diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_intf.rs index 5e589b9abce..9d6907c1092 100644 --- a/polkadot/node/core/pvf/src/worker_intf.rs +++ b/polkadot/node/core/pvf/src/worker_intf.rs @@ -71,6 +71,7 @@ pub async fn spawn_with_program_path( with_transient_socket_path(debug_id, |socket_path| { let socket_path = socket_path.to_owned(); + let worker_dir_path = worker_dir.path().to_owned(); async move { let listener = UnixListener::bind(&socket_path).map_err(|err| { @@ -91,7 +92,7 @@ pub async fn spawn_with_program_path( &program_path, &extra_args, &socket_path, - &worker_dir.path, + &worker_dir_path, security_status, ) .map_err(|err| { @@ -100,7 +101,7 @@ pub async fn spawn_with_program_path( %debug_id, ?program_path, ?extra_args, - ?worker_dir.path, + ?worker_dir_path, ?socket_path, "cannot spawn a worker: {:?}", err, @@ -108,7 +109,6 @@ pub async fn spawn_with_program_path( SpawnErr::ProcessSpawn })?; - let worker_dir_path = worker_dir.path.clone(); futures::select! { accept_result = listener.accept().fuse() => { let (stream, _) = accept_result.map_err(|err| { @@ -150,7 +150,42 @@ where F: FnOnce(&Path) -> Fut, Fut: futures::Future> + 'static, { - let socket_path = tmppath(&format!("pvf-host-{}", debug_id)) + /// Returns a path under [`std::env::temp_dir`]. The path name will start with the given prefix. + /// + /// There is only a certain number of retries. If exceeded this function will give up and return + /// an error. + pub async fn tmppath(prefix: &str) -> io::Result { + fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { + use rand::distributions::Alphanumeric; + + const DESCRIMINATOR_LEN: usize = 10; + + let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); + buf.extend(prefix.as_bytes()); + buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); + + let s = std::str::from_utf8(&buf) + .expect("the string is collected from a valid utf-8 sequence; qed"); + + let mut path = dir.to_owned(); + path.push(s); + path + } + + const NUM_RETRIES: usize = 50; + + let dir = std::env::temp_dir(); + for _ in 0..NUM_RETRIES { + let tmp_path = make_tmppath(prefix, &dir); + if !tmp_path.exists() { + return Ok(tmp_path) + } + } + + Err(io::Error::new(io::ErrorKind::Other, "failed to create a temporary path")) + } + + let socket_path = tmppath(&format!("pvf-host-{}-", debug_id)) .await .map_err(|_| SpawnErr::TmpPath)?; let result = f(&socket_path).await; @@ -162,46 +197,6 @@ where result } -/// Returns a path under the given `dir`. The path name will start with the given prefix. -/// -/// There is only a certain number of retries. If exceeded this function will give up and return an -/// error. -pub async fn tmppath_in(prefix: &str, dir: &Path) -> io::Result { - fn make_tmppath(prefix: &str, dir: &Path) -> PathBuf { - use rand::distributions::Alphanumeric; - - const DESCRIMINATOR_LEN: usize = 10; - - let mut buf = Vec::with_capacity(prefix.len() + DESCRIMINATOR_LEN); - buf.extend(prefix.as_bytes()); - buf.extend(rand::thread_rng().sample_iter(&Alphanumeric).take(DESCRIMINATOR_LEN)); - - let s = std::str::from_utf8(&buf) - .expect("the string is collected from a valid utf-8 sequence; qed"); - - let mut path = dir.to_owned(); - path.push(s); - path - } - - const NUM_RETRIES: usize = 50; - - for _ in 0..NUM_RETRIES { - let tmp_path = make_tmppath(prefix, dir); - if !tmp_path.exists() { - return Ok(tmp_path) - } - } - - Err(io::Error::new(io::ErrorKind::Other, "failed to create a temporary path")) -} - -/// The same as [`tmppath_in`], but uses [`std::env::temp_dir`] as the directory. -pub async fn tmppath(prefix: &str) -> io::Result { - let temp_dir = std::env::temp_dir(); - tmppath_in(prefix, &temp_dir).await -} - /// A struct that represents an idle worker. /// /// This struct is supposed to be used as a token that is passed by move into a subroutine that @@ -224,8 +219,6 @@ pub struct IdleWorker { pub enum SpawnErr { /// Cannot obtain a temporary path location. TmpPath, - /// An FS error occurred. - Fs(String), /// Cannot bind the socket to the given path. Bind, /// An error happened during accepting a connection to the socket. @@ -419,26 +412,22 @@ pub async fn framed_recv(r: &mut (impl AsyncRead + Unpin)) -> io::Result /// ``` #[derive(Debug)] pub struct WorkerDir { - pub path: PathBuf, + tempdir: tempfile::TempDir, } impl WorkerDir { /// Creates a new, empty worker dir with a random name in the given cache dir. pub async fn new(debug_id: &'static str, cache_dir: &Path) -> Result { let prefix = format!("worker-dir-{}-", debug_id); - let path = tmppath_in(&prefix, cache_dir).await.map_err(|_| SpawnErr::TmpPath)?; - tokio::fs::create_dir(&path) - .await - .map_err(|err| SpawnErr::Fs(err.to_string()))?; - Ok(Self { path }) + let tempdir = tempfile::Builder::new() + .prefix(&prefix) + .tempdir_in(cache_dir) + .map_err(|_| SpawnErr::TmpPath)?; + Ok(Self { tempdir }) } -} -// Try to clean up the temporary worker dir at the end of the worker's lifetime. It should be wiped -// on startup, but we make a best effort not to leave it around. -impl Drop for WorkerDir { - fn drop(&mut self) { - let _ = std::fs::remove_dir_all(&self.path); + pub fn path(&self) -> &Path { + self.tempdir.path() } } diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 4c81ac502dd..b53d598cd0f 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -18,6 +18,8 @@ use assert_matches::assert_matches; use parity_scale_codec::Encode as _; +#[cfg(all(feature = "ci-only-tests", target_os = "linux"))] +use polkadot_node_core_pvf::SecurityStatus; use polkadot_node_core_pvf::{ start, testing::build_workers_and_get_paths, Config, InvalidCandidate, Metrics, PrepareError, PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, @@ -122,6 +124,11 @@ impl TestHost { .unwrap(); result_rx.await.unwrap() } + + #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] + async fn security_status(&self) -> SecurityStatus { + self.host.lock().await.security_status.clone() + } } #[tokio::test] @@ -402,3 +409,28 @@ async fn prepare_can_run_serially() { // Prepare a different wasm blob to prevent skipping work. let _stats = host.precheck_pvf(halt::wasm_binary_unwrap(), Default::default()).await.unwrap(); } + +// CI machines should be able to enable all the security features. +#[cfg(all(feature = "ci-only-tests", target_os = "linux"))] +#[tokio::test] +async fn all_security_features_work() { + // Landlock is only available starting Linux 5.13, and we may be testing on an old kernel. + let sysinfo = sc_sysinfo::gather_sysinfo(); + // The version will look something like "5.15.0-87-generic". + let version = sysinfo.linux_kernel.unwrap(); + let version_split: Vec<&str> = version.split(".").collect(); + let major: u32 = version_split[0].parse().unwrap(); + let minor: u32 = version_split[1].parse().unwrap(); + let can_enable_landlock = if major >= 6 { true } else { minor >= 13 }; + + let host = TestHost::new().await; + + assert_eq!( + host.security_status().await, + SecurityStatus { + can_enable_landlock, + can_enable_seccomp: true, + can_unshare_user_namespace_and_change_root: true, + } + ); +} diff --git a/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl b/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl index 46bb8bcdf72..135999a092a 100644 --- a/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl +++ b/polkadot/zombienet_tests/functional/0001-parachains-pvf.zndsl @@ -54,8 +54,8 @@ one: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets [" two: reports histogram polkadot_pvf_preparation_time has 0 samples in buckets ["20", "30", "60", "120", "+Inf"] within 10 seconds # Check execution time. -# There are two different timeout conditions: BACKING_EXECUTION_TIMEOUT(2s) and -# APPROVAL_EXECUTION_TIMEOUT(6s). Currently these are not differentiated by metrics +# There are two different timeout conditions: DEFAULT_BACKING_EXECUTION_TIMEOUT(2s) and +# DEFAULT_APPROVAL_EXECUTION_TIMEOUT(12s). Currently these are not differentiated by metrics # because the metrics are defined in `polkadot-node-core-pvf` which is a level below # the relevant subsystems. # That being said, we will take the simplifying assumption of testing only the -- GitLab From 0d6dcb3f48559eff24b3a4e2233fc31e145717d2 Mon Sep 17 00:00:00 2001 From: James Wilson Date: Wed, 22 Nov 2023 17:27:53 +0000 Subject: [PATCH 099/199] Make TypeInfo for SkipCheckIfFeeless transparent, too (#2449) The `SkipCheckifFeeless::IDENTIFIER` became transparent (ie was whatever the inner signed ext was). This PR just makes the `TypeInfo` transparent too, so that libraries that use said info to decode the data (ie subxt) can behave identically whether or not the `SkipCheckifFeeless` wrapper is used or not. --------- Co-authored-by: Oliver Tale-Yazdi --- .../skip-feeless-payment/src/lib.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 55afaaf93d7..6c34c26ce92 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -40,7 +40,7 @@ use frame_support::{ dispatch::{CheckIfFeeless, DispatchResult}, traits::{IsType, OriginTrait}, }; -use scale_info::TypeInfo; +use scale_info::{StaticTypeInfo, TypeInfo}; use sp_runtime::{ traits::{DispatchInfoOf, PostDispatchInfoOf, SignedExtension}, transaction_validity::TransactionValidityError, @@ -75,10 +75,17 @@ pub mod pallet { } /// A [`SignedExtension`] that skips the wrapped extension if the dispatchable is feeless. -#[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] -#[scale_info(skip_type_params(T))] +#[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct SkipCheckIfFeeless(pub S, sp_std::marker::PhantomData); +// Make this extension "invisible" from the outside (ie metadata type information) +impl TypeInfo for SkipCheckIfFeeless { + type Identity = S; + fn type_info() -> scale_info::Type { + S::type_info() + } +} + impl sp_std::fmt::Debug for SkipCheckIfFeeless { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { -- GitLab From 0956357b602af1bb10f9bd6fb3998b856bd07592 Mon Sep 17 00:00:00 2001 From: girazoki Date: Wed, 22 Nov 2023 18:29:06 +0100 Subject: [PATCH 100/199] work with additional key values (#1757) Add the possibility to inject additional key-values in the sproof-builder that generates the relay root that gets stored in parachain-system. Rationale: pallets that verify additional storage items (not those verified by parachain-system) from the relay should be able to proof against the relay root that gets stored in parachain-system. This PR allows to create provide additional nibles that can later be used for verifiability in other pallets --- cumulus/primitives/parachain-inherent/src/mock.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cumulus/primitives/parachain-inherent/src/mock.rs b/cumulus/primitives/parachain-inherent/src/mock.rs index 5168b46a14d..e40cb49acdd 100644 --- a/cumulus/primitives/parachain-inherent/src/mock.rs +++ b/cumulus/primitives/parachain-inherent/src/mock.rs @@ -61,6 +61,8 @@ pub struct MockValidationDataInherentDataProvider { pub raw_downward_messages: Vec>, // Inbound Horizontal messages sorted by channel pub raw_horizontal_messages: Vec<(ParaId, Vec)>, + // Additional key-value pairs that should be injected. + pub additional_key_values: Option, Vec)>>, } pub trait GenerateRandomness { @@ -210,6 +212,10 @@ impl> InherentDataProvider sproof_builder.randomness = self.relay_randomness_config.generate_randomness(self.current_para_block.into()); + if let Some(key_values) = &self.additional_key_values { + sproof_builder.additional_key_values = key_values.clone() + } + let (relay_parent_storage_root, proof) = sproof_builder.into_state_root_and_proof(); inherent_data.put_data( -- GitLab From 49874882cf3eadde01951ee48916c9811d6a8c7e Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 22 Nov 2023 19:24:11 +0100 Subject: [PATCH 101/199] polkadot-node-subsystems: `ChainApiBackend` added + `polkadot-debug` image version fixed (#2411) The out-dated version (bad tag) of [polkadot image](https://github.com/paritytech/polkadot-sdk/blob/ede4a362622dfa69eb167eaa876246b1289f4b41/.gitlab/pipeline/zombienet/cumulus.yml#L31) ([docker info](https://hub.docker.com/layers/paritypr/polkadot-debug/master/images/sha256:adb1658052cf671b50c90d5cece5c7a131efa1a95978249bd5cb85a5ad654f7a?context=explore)) was used. This PR fixes this. See also: https://github.com/paritytech/polkadot-sdk/pull/2411#issuecomment-1822632724 Also adds an abstraction that allows asynchronous backends to be passed to `ChainApiSubsystem` --------- Co-authored-by: Sebastian Kunert --- .gitlab/pipeline/zombienet/cumulus.yml | 2 +- Cargo.lock | 6 ++ .../relay-chain-minimal-node/Cargo.toml | 4 + .../src/blockchain_rpc_client.rs | 86 ++++++++++++++++++- .../src/collator_overseer.rs | 3 +- polkadot/node/core/chain-api/Cargo.toml | 5 +- polkadot/node/core/chain-api/src/lib.rs | 79 +++++++++-------- polkadot/node/core/chain-api/src/tests.rs | 21 +++-- polkadot/node/overseer/src/lib.rs | 4 +- polkadot/node/subsystem-types/Cargo.toml | 1 + polkadot/node/subsystem-types/src/lib.rs | 2 +- .../subsystem-types/src/runtime_client.rs | 57 +++++++++++- 12 files changed, 214 insertions(+), 56 deletions(-) diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 3cac67c2966..3e4df000b7f 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -28,7 +28,7 @@ - job: build-push-image-test-parachain artifacts: true variables: - POLKADOT_IMAGE: "docker.io/paritypr/polkadot-debug:master" + POLKADOT_IMAGE: "docker.io/paritypr/polkadot-debug:${DOCKER_IMAGES_VERSION}" GH_DIR: "https://github.com/paritytech/cumulus/tree/${CI_COMMIT_SHORT_SHA}/zombienet/tests" LOCAL_DIR: "/builds/parity/mirrors/polkadot-sdk/cumulus/zombienet/tests" COL_IMAGE: "docker.io/paritypr/test-parachain:${DOCKER_IMAGES_VERSION}" diff --git a/Cargo.lock b/Cargo.lock index bfe24e528eb..26da3b4f825 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4183,6 +4183,7 @@ dependencies = [ "polkadot-core-primitives", "polkadot-network-bridge", "polkadot-node-collation-generation", + "polkadot-node-core-chain-api", "polkadot-node-core-prospective-parachains", "polkadot-node-core-runtime-api", "polkadot-node-network-protocol", @@ -4190,16 +4191,19 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "sc-authority-discovery", + "sc-client-api", "sc-network", "sc-network-common", "sc-service", "sc-tracing", "sc-utils", "sp-api", + "sp-blockchain", "sp-consensus", "sp-consensus-babe", "sp-runtime", "substrate-prometheus-endpoint", + "tokio", "tracing", ] @@ -12443,6 +12447,7 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", "polkadot-primitives", "sc-client-api", "sc-consensus-babe", @@ -12851,6 +12856,7 @@ dependencies = [ "smallvec", "sp-api", "sp-authority-discovery", + "sp-blockchain", "sp-consensus-babe", "substrate-prometheus-endpoint", "thiserror", diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index 53173fb4118..ce76fc5cd6d 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -19,6 +19,7 @@ polkadot-collator-protocol = { path = "../../../polkadot/node/network/collator-p polkadot-network-bridge = { path = "../../../polkadot/node/network/bridge" } polkadot-node-collation-generation = { path = "../../../polkadot/node/collation-generation" } polkadot-node-core-runtime-api = { path = "../../../polkadot/node/core/runtime-api" } +polkadot-node-core-chain-api = { path = "../../../polkadot/node/core/chain-api" } polkadot-node-core-prospective-parachains = { path = "../../../polkadot/node/core/prospective-parachains" } # substrate deps @@ -26,6 +27,7 @@ sc-authority-discovery = { path = "../../../substrate/client/authority-discovery sc-network = { path = "../../../substrate/client/network" } sc-network-common = { path = "../../../substrate/client/network/common" } sc-service = { path = "../../../substrate/client/service" } +sc-client-api = { path = "../../../substrate/client/api" } substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } sc-tracing = { path = "../../../substrate/client/tracing" } sc-utils = { path = "../../../substrate/client/utils" } @@ -33,6 +35,8 @@ sp-api = { path = "../../../substrate/primitives/api" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } sp-consensus = { path = "../../../substrate/primitives/consensus/common" } sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } +tokio = { version = "1.32.0", features = ["macros"] } # cumulus deps cumulus-relay-chain-interface = { path = "../relay-chain-interface" } diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index a473b3bced0..c40ca5c858b 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -20,14 +20,16 @@ use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use cumulus_relay_chain_rpc_interface::RelayChainRpcClient; use futures::{Stream, StreamExt}; use polkadot_core_primitives::{Block, BlockNumber, Hash, Header}; -use polkadot_overseer::RuntimeApiSubsystemClient; +use polkadot_overseer::{ChainApiBackend, RuntimeApiSubsystemClient}; use polkadot_primitives::{ async_backing::{AsyncBackingParams, BackingState}, slashing, vstaging::NodeFeatures, }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; -use sp_api::{ApiError, RuntimeApiInfo}; +use sc_client_api::AuxStore; +use sp_api::{ApiError, BlockT, HeaderT, NumberFor, RuntimeApiInfo}; +use sp_blockchain::Info; #[derive(Clone)] pub struct BlockChainRpcClient { @@ -54,6 +56,64 @@ impl BlockChainRpcClient { } } +#[async_trait::async_trait] +impl ChainApiBackend for BlockChainRpcClient { + async fn header( + &self, + hash: ::Hash, + ) -> sp_blockchain::Result::Header>> { + Ok(self.rpc_client.chain_get_header(Some(hash)).await?) + } + + async fn info(&self) -> sp_blockchain::Result> { + let (best_header_opt, genesis_hash, finalized_head) = futures::try_join!( + self.rpc_client.chain_get_header(None), + self.rpc_client.chain_get_head(Some(0)), + self.rpc_client.chain_get_finalized_head() + )?; + let best_header = best_header_opt.ok_or_else(|| { + RelayChainError::GenericError( + "Unable to retrieve best header from relay chain.".to_string(), + ) + })?; + + let finalized_header = + self.rpc_client.chain_get_header(Some(finalized_head)).await?.ok_or_else(|| { + RelayChainError::GenericError( + "Unable to retrieve finalized header from relay chain.".to_string(), + ) + })?; + Ok(Info { + best_hash: best_header.hash(), + best_number: best_header.number, + genesis_hash, + finalized_hash: finalized_head, + finalized_number: finalized_header.number, + finalized_state: Some((finalized_header.hash(), finalized_header.number)), + number_leaves: 1, + block_gap: None, + }) + } + + async fn number( + &self, + hash: ::Hash, + ) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + Ok(self + .rpc_client + .chain_get_header(Some(hash)) + .await? + .map(|maybe_header| maybe_header.number)) + } + + async fn hash( + &self, + number: NumberFor, + ) -> sp_blockchain::Result::Hash>> { + Ok(self.rpc_client.chain_get_block_hash(number.into()).await?) + } +} + #[async_trait::async_trait] impl RuntimeApiSubsystemClient for BlockChainRpcClient { async fn validators( @@ -403,3 +463,25 @@ impl BlockChainRpcClient { Ok(self.rpc_client.get_finalized_heads_stream()?.boxed()) } } + +// Implementation required by ChainApiSubsystem +// but never called in our case. +impl AuxStore for BlockChainRpcClient { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + _insert: I, + _delete: D, + ) -> sp_blockchain::Result<()> { + unimplemented!("Not supported on the RPC collator") + } + + fn get_aux(&self, _key: &[u8]) -> sp_blockchain::Result>> { + unimplemented!("Not supported on the RPC collator") + } +} diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index 945344f85e9..a785a9f6f79 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -24,6 +24,7 @@ use polkadot_network_bridge::{ NetworkBridgeTx as NetworkBridgeTxSubsystem, }; use polkadot_node_collation_generation::CollationGenerationSubsystem; +use polkadot_node_core_chain_api::ChainApiSubsystem; use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; use polkadot_node_core_runtime_api::RuntimeApiSubsystem; use polkadot_node_network_protocol::{ @@ -112,7 +113,7 @@ fn build_overseer( .candidate_backing(DummySubsystem) .candidate_validation(DummySubsystem) .pvf_checker(DummySubsystem) - .chain_api(DummySubsystem) + .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) .collator_protocol({ let side = ProtocolSide::Collator { diff --git a/polkadot/node/core/chain-api/Cargo.toml b/polkadot/node/core/chain-api/Cargo.toml index 154fa20e75d..fa824e78ffe 100644 --- a/polkadot/node/core/chain-api/Cargo.toml +++ b/polkadot/node/core/chain-api/Cargo.toml @@ -9,10 +9,9 @@ description = "The Chain API subsystem provides access to chain related utility [dependencies] futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } -sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } -polkadot-primitives = { path = "../../../primitives" } polkadot-node-metrics = { path = "../../metrics" } polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-node-subsystem-types = { path = "../../subsystem-types" } sc-client-api = { path = "../../../../substrate/client/api" } sc-consensus-babe = { path = "../../../../substrate/client/consensus/babe" } @@ -21,5 +20,7 @@ futures = { version = "0.3.21", features = ["thread-pool"] } maplit = "1.0.2" parity-scale-codec = "3.6.1" polkadot-node-primitives = { path = "../../primitives" } +polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-core = { path = "../../../../substrate/primitives/core" } +sp-blockchain = { path = "../../../../substrate/primitives/blockchain" } diff --git a/polkadot/node/core/chain-api/src/lib.rs b/polkadot/node/core/chain-api/src/lib.rs index 9b25481d718..7fd5166310f 100644 --- a/polkadot/node/core/chain-api/src/lib.rs +++ b/polkadot/node/core/chain-api/src/lib.rs @@ -35,13 +35,13 @@ use std::sync::Arc; use futures::prelude::*; use sc_client_api::AuxStore; -use sp_blockchain::HeaderBackend; +use futures::stream::StreamExt; use polkadot_node_subsystem::{ messages::ChainApiMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemResult, }; -use polkadot_primitives::Block; +use polkadot_node_subsystem_types::ChainApiBackend; mod metrics; use self::metrics::Metrics; @@ -67,7 +67,7 @@ impl ChainApiSubsystem { #[overseer::subsystem(ChainApi, error = SubsystemError, prefix = self::overseer)] impl ChainApiSubsystem where - Client: HeaderBackend + AuxStore + 'static, + Client: ChainApiBackend + AuxStore + 'static, { fn start(self, ctx: Context) -> SpawnedSubsystem { let future = run::(ctx, self) @@ -83,7 +83,7 @@ async fn run( subsystem: ChainApiSubsystem, ) -> SubsystemResult<()> where - Client: HeaderBackend + AuxStore, + Client: ChainApiBackend + AuxStore, { loop { match ctx.recv().await? { @@ -93,13 +93,15 @@ where FromOrchestra::Communication { msg } => match msg { ChainApiMessage::BlockNumber(hash, response_channel) => { let _timer = subsystem.metrics.time_block_number(); - let result = subsystem.client.number(hash).map_err(|e| e.to_string().into()); + let result = + subsystem.client.number(hash).await.map_err(|e| e.to_string().into()); subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, ChainApiMessage::BlockHeader(hash, response_channel) => { let _timer = subsystem.metrics.time_block_header(); - let result = subsystem.client.header(hash).map_err(|e| e.to_string().into()); + let result = + subsystem.client.header(hash).await.map_err(|e| e.to_string().into()); subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, @@ -113,46 +115,51 @@ where ChainApiMessage::FinalizedBlockHash(number, response_channel) => { let _timer = subsystem.metrics.time_finalized_block_hash(); // Note: we don't verify it's finalized - let result = subsystem.client.hash(number).map_err(|e| e.to_string().into()); + let result = + subsystem.client.hash(number).await.map_err(|e| e.to_string().into()); subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, ChainApiMessage::FinalizedBlockNumber(response_channel) => { let _timer = subsystem.metrics.time_finalized_block_number(); - let result = subsystem.client.info().finalized_number; - // always succeeds - subsystem.metrics.on_request(true); - let _ = response_channel.send(Ok(result)); + let result = subsystem + .client + .info() + .await + .map_err(|e| e.to_string().into()) + .map(|info| info.finalized_number); + subsystem.metrics.on_request(result.is_ok()); + let _ = response_channel.send(result); }, ChainApiMessage::Ancestors { hash, k, response_channel } => { let _timer = subsystem.metrics.time_ancestors(); gum::trace!(target: LOG_TARGET, hash=%hash, k=k, "ChainApiMessage::Ancestors"); - let mut hash = hash; - - let next_parent = core::iter::from_fn(|| { - let maybe_header = subsystem.client.header(hash); - match maybe_header { - // propagate the error - Err(e) => { - let e = e.to_string().into(); - Some(Err(e)) - }, - // fewer than `k` ancestors are available - Ok(None) => None, - Ok(Some(header)) => { - // stop at the genesis header. - if header.number == 0 { - None - } else { - hash = header.parent_hash; - Some(Ok(hash)) - } - }, - } - }); - - let result = next_parent.take(k).collect::, _>>(); + let next_parent_stream = futures::stream::unfold( + (hash, subsystem.client.clone()), + |(hash, client)| async move { + let maybe_header = client.header(hash).await; + match maybe_header { + // propagate the error + Err(e) => { + let e = e.to_string().into(); + Some((Err(e), (hash, client))) + }, + // fewer than `k` ancestors are available + Ok(None) => None, + Ok(Some(header)) => { + // stop at the genesis header. + if header.number == 0 { + None + } else { + Some((Ok(header.parent_hash), (header.parent_hash, client))) + } + }, + } + }, + ); + + let result = next_parent_stream.take(k).try_collect().await; subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, diff --git a/polkadot/node/core/chain-api/src/tests.rs b/polkadot/node/core/chain-api/src/tests.rs index 331a4f9ba82..eae8f6fa4ac 100644 --- a/polkadot/node/core/chain-api/src/tests.rs +++ b/polkadot/node/core/chain-api/src/tests.rs @@ -22,7 +22,8 @@ use std::collections::BTreeMap; use polkadot_node_primitives::BlockWeight; use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle}; -use polkadot_primitives::{BlockNumber, Hash, Header}; +use polkadot_node_subsystem_types::ChainApiBackend; +use polkadot_primitives::{Block, BlockNumber, Hash, Header}; use sp_blockchain::Info as BlockInfo; use sp_core::testing::TaskExecutor; @@ -110,7 +111,7 @@ fn last_key_value(map: &BTreeMap) -> (K, V) { map.iter().last().map(|(k, v)| (k.clone(), v.clone())).unwrap() } -impl HeaderBackend for TestClient { +impl sp_blockchain::HeaderBackend for TestClient { fn info(&self) -> BlockInfo { let genesis_hash = self.blocks.iter().next().map(|(h, _)| *h).unwrap(); let (best_hash, best_number) = last_key_value(&self.blocks); @@ -191,8 +192,8 @@ fn request_block_number() { async move { let zero = Hash::zero(); let test_cases = [ - (TWO, client.number(TWO).unwrap()), - (zero, client.number(zero).unwrap()), // not here + (TWO, client.number(TWO).await.unwrap()), + (zero, client.number(zero).await.unwrap()), // not here ]; for (hash, expected) in &test_cases { let (tx, rx) = oneshot::channel(); @@ -217,8 +218,10 @@ fn request_block_header() { test_harness(|client, mut sender| { async move { const NOT_HERE: Hash = Hash::repeat_byte(0x5); - let test_cases = - [(TWO, client.header(TWO).unwrap()), (NOT_HERE, client.header(NOT_HERE).unwrap())]; + let test_cases = [ + (TWO, client.header(TWO).await.unwrap()), + (NOT_HERE, client.header(NOT_HERE).await.unwrap()), + ]; for (hash, expected) in &test_cases { let (tx, rx) = oneshot::channel(); @@ -270,8 +273,8 @@ fn request_finalized_hash() { test_harness(|client, mut sender| { async move { let test_cases = [ - (1, client.hash(1).unwrap()), // not here - (2, client.hash(2).unwrap()), + (1, client.hash(1).await.unwrap()), // not here + (2, client.hash(2).await.unwrap()), ]; for (number, expected) in &test_cases { let (tx, rx) = oneshot::channel(); @@ -297,7 +300,7 @@ fn request_last_finalized_number() { async move { let (tx, rx) = oneshot::channel(); - let expected = client.info().finalized_number; + let expected = client.info().await.unwrap().finalized_number; sender .send(FromOrchestra::Communication { msg: ChainApiMessage::FinalizedBlockNumber(tx), diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 5207bb830d8..da99546a44f 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -87,8 +87,8 @@ use polkadot_node_subsystem_types::messages::{ pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, - jaeger, ActivatedLeaf, ActiveLeavesUpdate, OverseerSignal, RuntimeApiSubsystemClient, - UnpinHandle, + jaeger, ActivatedLeaf, ActiveLeavesUpdate, ChainApiBackend, OverseerSignal, + RuntimeApiSubsystemClient, UnpinHandle, }; pub mod metrics; diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 9fd3775da59..8e345cf222c 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -17,6 +17,7 @@ polkadot-node-jaeger = { path = "../jaeger" } orchestra = { version = "0.3.3", default-features = false, features=["futures_channel"] } sc-network = { path = "../../../substrate/client/network" } sp-api = { path = "../../../substrate/primitives/api" } +sp-blockchain = { path = "../../../substrate/primitives/blockchain" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } sc-client-api = { path = "../../../substrate/client/api" } diff --git a/polkadot/node/subsystem-types/src/lib.rs b/polkadot/node/subsystem-types/src/lib.rs index e3d6e4decf2..cd39aa03e56 100644 --- a/polkadot/node/subsystem-types/src/lib.rs +++ b/polkadot/node/subsystem-types/src/lib.rs @@ -40,7 +40,7 @@ pub mod errors; pub mod messages; mod runtime_client; -pub use runtime_client::{DefaultSubsystemClient, RuntimeApiSubsystemClient}; +pub use runtime_client::{ChainApiBackend, DefaultSubsystemClient, RuntimeApiSubsystemClient}; pub use jaeger::*; pub use polkadot_node_jaeger as jaeger; diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 8369fd215f4..36e3365cf08 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -18,17 +18,70 @@ use async_trait::async_trait; use polkadot_primitives::{ async_backing, runtime_api::ParachainHost, slashing, vstaging, Block, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, - DisputeState, ExecutorParams, GroupRotationInfo, Hash, Id, InboundDownwardMessage, + DisputeState, ExecutorParams, GroupRotationInfo, Hash, Header, Id, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; +use sc_client_api::HeaderBackend; use sc_transaction_pool_api::OffchainTransactionPoolFactory; -use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiError, ApiExt, HeaderT, NumberFor, ProvideRuntimeApi}; use sp_authority_discovery::AuthorityDiscoveryApi; +use sp_blockchain::Info; use sp_consensus_babe::{BabeApi, Epoch}; use std::{collections::BTreeMap, sync::Arc}; +/// Offers header utilities. +/// +/// This is a async wrapper trait for ['HeaderBackend'] to be used with the +/// `ChainApiSubsystem`. +// This trait was introduced to suit the needs of collators. Depending on their operating mode, they +// might not have a client of the relay chain that can supply a synchronous HeaderBackend +// implementation. +#[async_trait] +pub trait ChainApiBackend: Send + Sync { + /// Get block header. Returns `None` if block is not found. + async fn header(&self, hash: Hash) -> sp_blockchain::Result>; + /// Get blockchain info. + async fn info(&self) -> sp_blockchain::Result>; + /// Get block number by hash. Returns `None` if the header is not in the chain. + async fn number( + &self, + hash: Hash, + ) -> sp_blockchain::Result::Number>>; + /// Get block hash by number. Returns `None` if the header is not in the chain. + async fn hash(&self, number: NumberFor) -> sp_blockchain::Result>; +} + +#[async_trait] +impl ChainApiBackend for T +where + T: HeaderBackend, +{ + /// Get block header. Returns `None` if block is not found. + async fn header(&self, hash: Hash) -> sp_blockchain::Result> { + HeaderBackend::header(self, hash) + } + + /// Get blockchain info. + async fn info(&self) -> sp_blockchain::Result> { + Ok(HeaderBackend::info(self)) + } + + /// Get block number by hash. Returns `None` if the header is not in the chain. + async fn number( + &self, + hash: Hash, + ) -> sp_blockchain::Result::Number>> { + HeaderBackend::number(self, hash) + } + + /// Get block hash by number. Returns `None` if the header is not in the chain. + async fn hash(&self, number: NumberFor) -> sp_blockchain::Result> { + HeaderBackend::hash(self, number) + } +} + /// Exposes all runtime calls that are used by the runtime API subsystem. #[async_trait] pub trait RuntimeApiSubsystemClient { -- GitLab From ec189333845fffebd45bf023b0797d6a4f121d8d Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Wed, 22 Nov 2023 17:21:20 -0500 Subject: [PATCH 102/199] Remove `#[macro_use]` annotation from `mod service` in all nodes. (#2456) This PR removes `#[macro_use]` from the service module in each of the Substrate nodes in the repo. * Parachain Template * Polkadot Parachain * Minimal Node * Node Template * Kitchen Sink Node IDK why this annotation was present, maybe from when we had the `new_partial!` macro? --------- Co-authored-by: Joshy Orndorff --- cumulus/parachain-template/node/src/main.rs | 3 +-- cumulus/polkadot-parachain/src/main.rs | 3 +-- substrate/bin/minimal/node/src/main.rs | 3 +-- substrate/bin/node-template/node/src/main.rs | 5 ++--- substrate/bin/node/cli/src/lib.rs | 6 ++---- 5 files changed, 7 insertions(+), 13 deletions(-) diff --git a/cumulus/parachain-template/node/src/main.rs b/cumulus/parachain-template/node/src/main.rs index ba9f28b354f..12738a6793c 100644 --- a/cumulus/parachain-template/node/src/main.rs +++ b/cumulus/parachain-template/node/src/main.rs @@ -3,11 +3,10 @@ #![warn(missing_docs)] mod chain_spec; -#[macro_use] -mod service; mod cli; mod command; mod rpc; +mod service; fn main() -> sc_cli::Result<()> { command::run() diff --git a/cumulus/polkadot-parachain/src/main.rs b/cumulus/polkadot-parachain/src/main.rs index e40af8128f7..26d7dae4b8a 100644 --- a/cumulus/polkadot-parachain/src/main.rs +++ b/cumulus/polkadot-parachain/src/main.rs @@ -20,11 +20,10 @@ #![warn(unused_extern_crates)] mod chain_spec; -#[macro_use] -mod service; mod cli; mod command; mod rpc; +mod service; fn main() -> sc_cli::Result<()> { command::run() diff --git a/substrate/bin/minimal/node/src/main.rs b/substrate/bin/minimal/node/src/main.rs index 900651fd1fd..3cf7d98311e 100644 --- a/substrate/bin/minimal/node/src/main.rs +++ b/substrate/bin/minimal/node/src/main.rs @@ -19,11 +19,10 @@ #![warn(missing_docs)] mod chain_spec; -#[macro_use] -mod service; mod cli; mod command; mod rpc; +mod service; fn main() -> sc_cli::Result<()> { command::run() diff --git a/substrate/bin/node-template/node/src/main.rs b/substrate/bin/node-template/node/src/main.rs index 426cbabb6fb..8918dd43a01 100644 --- a/substrate/bin/node-template/node/src/main.rs +++ b/substrate/bin/node-template/node/src/main.rs @@ -1,13 +1,12 @@ //! Substrate Node Template CLI library. #![warn(missing_docs)] -mod chain_spec; -#[macro_use] -mod service; mod benchmarking; +mod chain_spec; mod cli; mod command; mod rpc; +mod service; fn main() -> sc_cli::Result<()> { command::run() diff --git a/substrate/bin/node/cli/src/lib.rs b/substrate/bin/node/cli/src/lib.rs index 2fe238ef316..0ff544932a9 100644 --- a/substrate/bin/node/cli/src/lib.rs +++ b/substrate/bin/node/cli/src/lib.rs @@ -30,16 +30,14 @@ #![warn(missing_docs)] -pub mod chain_spec; - -#[macro_use] -pub mod service; #[cfg(feature = "cli")] mod benchmarking; +pub mod chain_spec; #[cfg(feature = "cli")] mod cli; #[cfg(feature = "cli")] mod command; +pub mod service; #[cfg(feature = "cli")] pub use cli::*; -- GitLab From 2d09e83d0703ca6bf6aba773e80ea14576887ac7 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 23 Nov 2023 15:18:24 +0400 Subject: [PATCH 103/199] Add `on-chain-release-build` feature for Collectives Westend (#2463) Collectives Westend is missing an `on-chain-release-build` feature flag. --- .../collectives-westend/Cargo.toml | 108 ++++++++++-------- 1 file changed, 60 insertions(+), 48 deletions(-) diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 94f2de33e90..79d6c697b5f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -7,69 +7,76 @@ license = "Apache-2.0" description = "Westend Collectives Parachain Runtime" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ + "derive", + "max-encoded-len", +] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } pallet-preimage = { path = "../../../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } pallet-scheduler = { path = "../../../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false} -pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false} -pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false} -pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false } +pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false } +pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false } +pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = [ + "parameterized-consensus-hook", +] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -84,7 +91,7 @@ testnets-common = { path = "../../../testnets-common", default-features = false substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dev-dependencies] -sp-io = { path = "../../../../../substrate/primitives/io", features = [ "std" ]} +sp-io = { path = "../../../../../substrate/primitives/io", features = ["std"] } [features] default = [ "std" ] @@ -228,3 +235,8 @@ std = [ ] experimental = [ "pallet-aura/experimental" ] + +# A feature that should be enabled when the runtime should be built for on-chain +# deployment. This will disable stuff that shouldn't be part of the on-chain wasm +# to make it smaller like logging for example. +on-chain-release-build = [ "sp-api/disable-logging" ] -- GitLab From 12062f6a3a75a36e6e61836ab15c5926d9773583 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Thu, 23 Nov 2023 17:52:06 +0400 Subject: [PATCH 104/199] CI: Disable runtime upgrade spec name check on Westend Asset Hub and fix Staking pallet migration (#2447) Westend Asset Hub currently failing because the spec name is being changed next runtime upgrade (https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/4413125). This also fixes an idempotency issue with a staking pallet migration. Similar issues will be caught automatically now that we've also updated to try-runtime-cli v0.5.0 which checks for idempotency issues. This also enables try-state checks running in the CI. --- .gitlab/pipeline/check.yml | 13 +++++++------ substrate/frame/staking/src/migrations.rs | 12 +----------- 2 files changed, 8 insertions(+), 17 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 429491fb174..b2f2efc5e66 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -101,16 +101,16 @@ check-rust-feature-propagation: export RUST_LOG=remote-ext=debug,runtime=debug echo "---------- Downloading try-runtime CLI ----------" - curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.3.3/try-runtime-x86_64-unknown-linux-musl -o try-runtime + curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.0/try-runtime-x86_64-unknown-linux-musl -o try-runtime chmod +x ./try-runtime echo "---------- Building ${PACKAGE} runtime ----------" time cargo build --release --locked -p "$PACKAGE" --features try-runtime echo "---------- Executing on-runtime-upgrade for ${NETWORK} ----------" - time ./try-runtime \ + time ./try-runtime ${COMMAND_EXTRA_ARGS} \ --runtime ./target/release/wbuild/"$PACKAGE"/"$WASM" \ - on-runtime-upgrade --checks=pre-and-post ${EXTRA_ARGS} live --uri ${URI} + on-runtime-upgrade --disable-spec-version-check --checks=all ${SUBCOMMAND_EXTRA_ARGS} live --uri ${URI} # Check runtime migrations for Parity managed relay chains check-runtime-migration-westend: @@ -124,7 +124,7 @@ check-runtime-migration-westend: PACKAGE: "westend-runtime" WASM: "westend_runtime.compact.compressed.wasm" URI: "wss://westend-try-runtime-node.parity-chains.parity.io:443" - EXTRA_ARGS: "--no-weight-warnings" + SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" check-runtime-migration-rococo: stage: check @@ -137,7 +137,7 @@ check-runtime-migration-rococo: PACKAGE: "rococo-runtime" WASM: "rococo_runtime.compact.compressed.wasm" URI: "wss://rococo-try-runtime-node.parity-chains.parity.io:443" - EXTRA_ARGS: "--no-weight-warnings" + SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" # Check runtime migrations for Parity managed asset hub chains check-runtime-migration-asset-hub-westend: @@ -151,7 +151,7 @@ check-runtime-migration-asset-hub-westend: PACKAGE: "asset-hub-westend-runtime" WASM: "asset_hub_westend_runtime.compact.compressed.wasm" URI: "wss://westend-asset-hub-rpc.polkadot.io:443" - + check-runtime-migration-asset-hub-rococo: stage: check extends: @@ -214,6 +214,7 @@ check-runtime-migration-collectives-westend: PACKAGE: "collectives-westend-runtime" WASM: "collectives_westend_runtime.compact.compressed.wasm" URI: "wss://westend-collectives-rpc.polkadot.io:443" + COMMAND_EXTRA_ARGS: "--disable-spec-name-check" find-fail-ci-phrase: stage: check diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 84b00254126..311e9667ceb 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -81,20 +81,10 @@ pub mod v14 { } } - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, TryRuntimeError> { - frame_support::ensure!( - Pallet::::on_chain_storage_version() == 13, - "Required v13 before upgrading to v14." - ); - - Ok(Default::default()) - } - #[cfg(feature = "try-runtime")] fn post_upgrade(_state: Vec) -> Result<(), TryRuntimeError> { frame_support::ensure!( - Pallet::::on_chain_storage_version() == 14, + Pallet::::on_chain_storage_version() >= 14, "v14 not applied" ); Ok(()) -- GitLab From 21f1811c6600d8a7fe043592ff34dcb79284d583 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 23 Nov 2023 14:52:46 +0100 Subject: [PATCH 105/199] sp-api: Move macro related re-exports to `__private` (#2446) This moves the macro related re-exports to `__private` to make it more obvious for downstream users that they are using an internal api. --------- Co-authored-by: command-bot <> --- Cargo.lock | 10 ++- .../relay-chain-rpc-interface/Cargo.toml | 1 + .../src/rpc_client.rs | 2 +- cumulus/pallets/xcmp-queue/src/tests.rs | 2 +- polkadot/node/service/src/lib.rs | 2 +- substrate/client/api/src/call_executor.rs | 4 +- substrate/client/api/src/in_mem.rs | 3 +- substrate/client/consensus/beefy/src/lib.rs | 4 +- .../client/consensus/beefy/src/worker.rs | 7 +- substrate/client/executor/src/wasm_runtime.rs | 15 ++-- .../merkle-mountain-range/rpc/src/lib.rs | 4 +- substrate/client/rpc-spec-v2/Cargo.toml | 2 + .../client/rpc-spec-v2/src/archive/archive.rs | 4 +- .../src/chain_head/chain_head_storage.rs | 2 +- .../rpc-spec-v2/src/chain_head/test_utils.rs | 7 +- .../rpc-spec-v2/src/chain_head/tests.rs | 2 +- substrate/client/tracing/Cargo.toml | 1 + substrate/client/tracing/src/block/mod.rs | 3 +- substrate/frame/beefy-mmr/src/lib.rs | 3 +- .../frame/contracts/src/storage/meter.rs | 3 +- substrate/frame/nfts/runtime-api/Cargo.toml | 7 +- substrate/frame/nfts/runtime-api/src/lib.rs | 2 +- .../support/test/tests/construct_runtime.rs | 2 +- substrate/primitives/api/Cargo.toml | 16 ++-- .../primitives/api/proc-macro/src/utils.rs | 6 +- substrate/primitives/api/src/lib.rs | 80 +++++++++++-------- .../primitives/consensus/beefy/Cargo.toml | 16 ++-- .../primitives/consensus/beefy/src/mmr.rs | 3 +- .../frame/benchmarking-cli/src/block/bench.rs | 8 +- .../benchmarking-cli/src/storage/write.rs | 2 +- 30 files changed, 124 insertions(+), 99 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 26da3b4f825..1fe9d1b8192 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1493,8 +1493,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ "bitcoin_hashes", - "rand 0.7.3", - "rand_core 0.5.1", + "rand 0.8.5", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -4237,6 +4237,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-storage 13.0.0", + "sp-version", "thiserror", "tokio", "tokio-util", @@ -10423,6 +10424,7 @@ dependencies = [ "pallet-nfts", "parity-scale-codec", "sp-api", + "sp-std 8.0.0", ] [[package]] @@ -16124,6 +16126,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-externalities 0.19.0", "sp-maybe-compressed-blob", "sp-rpc", "sp-runtime", @@ -16357,6 +16360,7 @@ dependencies = [ "lazy_static", "libc", "log", + "parity-scale-codec", "parking_lot 0.12.1", "regex", "rustc-hash", @@ -20011,7 +20015,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.7.3", + "rand 0.8.5", "static_assertions", ] diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 93fe57145b0..0159cade23f 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -20,6 +20,7 @@ sp-authority-discovery = { path = "../../../substrate/primitives/authority-disco sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sp-storage = { path = "../../../substrate/primitives/storage" } sp-runtime = { path = "../../../substrate/primitives/runtime" } +sp-version = { path = "../../../substrate/primitives/version" } sc-client-api = { path = "../../../substrate/client/api" } sc-rpc-api = { path = "../../../substrate/client/rpc-api" } sc-service = { path = "../../../substrate/client/service" } diff --git a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs index cc993c6ff9f..8e0d5fae677 100644 --- a/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs +++ b/cumulus/client/relay-chain-rpc-interface/src/rpc_client.rs @@ -46,10 +46,10 @@ use cumulus_relay_chain_interface::{RelayChainError, RelayChainResult}; use sc_client_api::StorageData; use sc_rpc_api::{state::ReadProof, system::Health}; use sc_service::TaskManager; -use sp_api::RuntimeVersion; use sp_consensus_babe::Epoch; use sp_core::sp_std::collections::btree_map::BTreeMap; use sp_storage::StorageKey; +use sp_version::RuntimeVersion; use crate::{ light_client_worker::{build_smoldot_client, LightClientRpcWorker}, diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index 30dba6ead34..f88dedc1ece 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -155,7 +155,7 @@ fn xcm_enqueueing_broken_xcm_works() { .take(20) .collect::>(), ); - EnqueuedMessages::set(&vec![]); + EnqueuedMessages::take(); // But if we do it all in one page, then it only uses the first 10: XcmpQueue::handle_xcmp_messages( diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 0ed7940b3e8..c03835b2a4b 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -98,7 +98,7 @@ pub use service::{ ChainSpec, Configuration, Error as SubstrateServiceError, PruningMode, Role, RuntimeGenesis, TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TransactionPoolOptions, }; -pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi, StateBackend}; +pub use sp_api::{ApiRef, ConstructRuntimeApi, Core as CoreApi, ProvideRuntimeApi}; pub use sp_runtime::{ generic, traits::{self as runtime_traits, BlakeTwo256, Block as BlockT, Header as HeaderT, NumberFor}, diff --git a/substrate/client/api/src/call_executor.rs b/substrate/client/api/src/call_executor.rs index 49b51ccc943..d131cbcec00 100644 --- a/substrate/client/api/src/call_executor.rs +++ b/substrate/client/api/src/call_executor.rs @@ -21,12 +21,12 @@ use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_core::traits::CallContext; use sp_externalities::Extensions; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, HashingFor}; use sp_state_machine::{OverlayedChanges, StorageProof}; use std::cell::RefCell; use crate::execution_extensions::ExecutionExtensions; -use sp_api::{HashingFor, ProofRecorder}; +use sp_api::ProofRecorder; /// Executor Provider pub trait ExecutorProvider { diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs index 807bdf0e334..b933ed1f17e 100644 --- a/substrate/client/api/src/in_mem.rs +++ b/substrate/client/api/src/in_mem.rs @@ -812,9 +812,8 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { #[cfg(test)] mod tests { use crate::{in_mem::Blockchain, NewBlockState}; - use sp_api::HeaderT; use sp_blockchain::Backend; - use sp_runtime::{ConsensusEngineId, Justifications}; + use sp_runtime::{traits::Header as HeaderT, ConsensusEngineId, Justifications}; use substrate_test_runtime::{Block, Header, H256}; pub const ID1: ConsensusEngineId = *b"TST1"; diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 89a5d51c887..3d104f13250 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -40,7 +40,7 @@ use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotificatio use sc_consensus::BlockImport; use sc_network::{NetworkRequest, ProtocolName}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing}; -use sp_api::{HeaderT, NumberFor, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_blockchain::{ Backend as BlockchainBackend, Error as ClientError, HeaderBackend, Result as ClientResult, }; @@ -51,7 +51,7 @@ use sp_consensus_beefy::{ }; use sp_keystore::KeystorePtr; use sp_mmr_primitives::MmrApi; -use sp_runtime::traits::{Block, Zero}; +use sp_runtime::traits::{Block, Header as HeaderT, NumberFor, Zero}; use std::{ collections::{BTreeMap, VecDeque}, marker::PhantomData, diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 966c7410365..0eea5647e51 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -36,7 +36,7 @@ use log::{debug, error, info, log_enabled, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; use sc_network_gossip::GossipEngine; use sc_utils::{mpsc::TracingUnboundedReceiver, notification::NotificationReceiver}; -use sp_api::{BlockId, ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; use sp_consensus::SyncOracle; use sp_consensus_beefy::{ @@ -46,7 +46,7 @@ use sp_consensus_beefy::{ VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ - generic::OpaqueDigestItemId, + generic::{BlockId, OpaqueDigestItemId}, traits::{Block, Header, NumberFor, Zero}, SaturatedConversion, }; @@ -1074,13 +1074,12 @@ pub(crate) mod tests { use sc_client_api::{Backend as BackendT, HeaderBackend}; use sc_network_sync::SyncingService; use sc_network_test::TestNetFactory; - use sp_api::HeaderT; use sp_blockchain::Backend as BlockchainBackendT; use sp_consensus_beefy::{ generate_equivocation_proof, known_payloads, known_payloads::MMR_ROOT_ID, mmr::MmrRootProvider, Keyring, Payload, SignedCommitment, }; - use sp_runtime::traits::One; + use sp_runtime::traits::{Header as HeaderT, One}; use substrate_test_runtime_client::{ runtime::{Block, Digest, DigestItem, Header}, Backend, diff --git a/substrate/client/executor/src/wasm_runtime.rs b/substrate/client/executor/src/wasm_runtime.rs index 6dec3abdb20..501279a312c 100644 --- a/substrate/client/executor/src/wasm_runtime.rs +++ b/substrate/client/executor/src/wasm_runtime.rs @@ -441,6 +441,7 @@ mod tests { use codec::Encode; use sp_api::{Core, RuntimeApiInfo}; use sp_runtime::RuntimeString; + use sp_version::{create_apis_vec, RuntimeVersion}; use sp_wasm_interface::HostFunctions; use substrate_test_runtime::Block; @@ -470,7 +471,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(>::ID, 1)]), + apis: create_apis_vec!([(>::ID, 1)]), }; let version = decode_version(&old_runtime_version.encode()).unwrap(); @@ -486,7 +487,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(>::ID, 3)]), + apis: create_apis_vec!([(>::ID, 3)]), }; decode_version(&old_runtime_version.encode()).unwrap_err(); @@ -494,13 +495,13 @@ mod tests { #[test] fn new_runtime_version_decodes() { - let old_runtime_version = sp_api::RuntimeVersion { + let old_runtime_version = RuntimeVersion { spec_name: "test".into(), impl_name: "test".into(), authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(>::ID, 3)]), + apis: create_apis_vec!([(>::ID, 3)]), transaction_version: 3, state_version: 4, }; @@ -509,13 +510,13 @@ mod tests { assert_eq!(3, version.transaction_version); assert_eq!(0, version.state_version); - let old_runtime_version = sp_api::RuntimeVersion { + let old_runtime_version = RuntimeVersion { spec_name: "test".into(), impl_name: "test".into(), authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(>::ID, 4)]), + apis: create_apis_vec!([(>::ID, 4)]), transaction_version: 3, state_version: 4, }; @@ -538,7 +539,7 @@ mod tests { authoring_version: 100, spec_version: 100, impl_version: 100, - apis: sp_api::create_apis_vec!([(>::ID, 4)]), + apis: create_apis_vec!([(>::ID, 4)]), transaction_version: 100, state_version: 1, }; diff --git a/substrate/client/merkle-mountain-range/rpc/src/lib.rs b/substrate/client/merkle-mountain-range/rpc/src/lib.rs index 5be82b600d9..1653749ffab 100644 --- a/substrate/client/merkle-mountain-range/rpc/src/lib.rs +++ b/substrate/client/merkle-mountain-range/rpc/src/lib.rs @@ -30,14 +30,14 @@ use jsonrpsee::{ }; use serde::{Deserialize, Serialize}; -use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi}; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::{ offchain::{storage::OffchainDb, OffchainDbExt, OffchainStorage}, Bytes, }; use sp_mmr_primitives::{Error as MmrError, Proof}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index ca61286ddfa..8ca4f321f71 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -37,12 +37,14 @@ tokio = { version = "1.22.0", features = ["sync"] } array-bytes = "6.1" log = "0.4.17" futures-util = { version = "0.3.19", default-features = false } + [dev-dependencies] serde_json = "1.0.108" tokio = { version = "1.22.0", features = ["macros"] } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } substrate-test-runtime = { path = "../../test-utils/runtime" } sp-consensus = { path = "../../primitives/consensus/common" } +sp-externalities = { path = "../../primitives/externalities" } sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } sc-block-builder = { path = "../block-builder" } sc-service = { path = "../service", features = ["test-helpers"]} diff --git a/substrate/client/rpc-spec-v2/src/archive/archive.rs b/substrate/client/rpc-spec-v2/src/archive/archive.rs index bded842d8fd..b2bcc62ce31 100644 --- a/substrate/client/rpc-spec-v2/src/archive/archive.rs +++ b/substrate/client/rpc-spec-v2/src/archive/archive.rs @@ -29,13 +29,13 @@ use jsonrpsee::core::{async_trait, RpcResult}; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, StorageProvider, }; -use sp_api::{CallApiAt, CallContext, NumberFor}; +use sp_api::{CallApiAt, CallContext}; use sp_blockchain::{ Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, }; use sp_core::Bytes; use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT}, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, SaturatedConversion, }; use std::{collections::HashSet, marker::PhantomData, sync::Arc}; diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs index 6e19f59a5d6..c23489a050e 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head_storage.rs @@ -22,8 +22,8 @@ use std::{collections::VecDeque, marker::PhantomData, sync::Arc}; use sc_client_api::{Backend, ChildInfo, StorageKey, StorageProvider}; use sc_utils::mpsc::TracingUnboundedSender; -use sp_api::BlockT; use sp_core::storage::well_known_keys; +use sp_runtime::traits::Block as BlockT; use crate::chain_head::event::OperationStorageItems; diff --git a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs index a901f3039ff..d63a98a5cb0 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/test_utils.rs @@ -24,14 +24,15 @@ use sc_client_api::{ StorageData, StorageEventStream, StorageKey, StorageProvider, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sp_api::{CallApiAt, CallApiAtParams, NumberFor, RuntimeVersion}; +use sp_api::{CallApiAt, CallApiAtParams}; use sp_blockchain::{BlockStatus, CachedHeaderMetadata, HeaderBackend, HeaderMetadata, Info}; use sp_consensus::BlockOrigin; use sp_runtime::{ generic::SignedBlock, - traits::{Block as BlockT, Header as HeaderT}, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, Justifications, }; +use sp_version::RuntimeVersion; use std::sync::Arc; use substrate_test_runtime::{Block, Hash, Header}; @@ -235,7 +236,7 @@ impl> CallApiAt for ChainHeadMock fn initialize_extensions( &self, at: ::Hash, - extensions: &mut sp_api::Extensions, + extensions: &mut sp_externalities::Extensions, ) -> Result<(), sp_api::ApiError> { self.client.initialize_extensions(at, extensions) } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 4e6775fe280..518b7da432b 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -34,7 +34,6 @@ use jsonrpsee::{ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::ChildInfo; use sc_service::client::new_in_mem; -use sp_api::BlockT; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_core::{ @@ -42,6 +41,7 @@ use sp_core::{ testing::TaskExecutor, Blake2Hasher, Hasher, }; +use sp_runtime::traits::Block as BlockT; use sp_version::RuntimeVersion; use std::{ collections::{HashMap, HashSet}, diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index ffcbf074908..796d4f1d826 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] ansi_term = "0.12.1" atty = "0.2.13" chrono = "0.4.27" +codec = { package = "parity-scale-codec", version = "3.6.1" } lazy_static = "1.4.0" libc = "0.2.121" log = { version = "0.4.17" } diff --git a/substrate/client/tracing/src/block/mod.rs b/substrate/client/tracing/src/block/mod.rs index 9ebf8e55c94..01744cd5563 100644 --- a/substrate/client/tracing/src/block/mod.rs +++ b/substrate/client/tracing/src/block/mod.rs @@ -25,6 +25,7 @@ use std::{ time::Instant, }; +use codec::Encode; use parking_lot::Mutex; use tracing::{ dispatcher, @@ -34,7 +35,7 @@ use tracing::{ use crate::{SpanDatum, TraceEvent, Values}; use sc_client_api::BlockBackend; -use sp_api::{Core, Encode, Metadata, ProvideRuntimeApi}; +use sp_api::{Core, Metadata, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; use sp_core::hexdisplay::HexDisplay; use sp_rpc::tracing::{BlockTrace, Span, TraceBlockResponse}; diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs index a0bf7cdcf86..fa3caba7977 100644 --- a/substrate/frame/beefy-mmr/src/lib.rs +++ b/substrate/frame/beefy-mmr/src/lib.rs @@ -36,6 +36,7 @@ use sp_runtime::traits::{Convert, Member}; use sp_std::prelude::*; +use codec::Decode; use pallet_mmr::{LeafDataProvider, ParentNumberAndHash}; use sp_consensus_beefy::{ mmr::{BeefyAuthoritySet, BeefyDataProvider, BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}, @@ -226,7 +227,7 @@ sp_api::decl_runtime_apis! { /// API useful for BEEFY light clients. pub trait BeefyMmrApi where - BeefyAuthoritySet: sp_api::Decode, + BeefyAuthoritySet: Decode, { /// Return the currently active BEEFY authority set proof. fn authority_set_proof() -> BeefyAuthoritySet; diff --git a/substrate/frame/contracts/src/storage/meter.rs b/substrate/frame/contracts/src/storage/meter.rs index 9f098090bc8..495cbd90db5 100644 --- a/substrate/frame/contracts/src/storage/meter.rs +++ b/substrate/frame/contracts/src/storage/meter.rs @@ -33,9 +33,8 @@ use frame_support::{ }, DefaultNoBound, RuntimeDebugNoBound, }; -use sp_api::HashT; use sp_runtime::{ - traits::{Saturating, Zero}, + traits::{Hash as HashT, Saturating, Zero}, DispatchError, FixedPointNumber, FixedU128, }; use sp_std::{fmt::Debug, marker::PhantomData, vec, vec::Vec}; diff --git a/substrate/frame/nfts/runtime-api/Cargo.toml b/substrate/frame/nfts/runtime-api/Cargo.toml index 483c4bd3234..092edaaaa89 100644 --- a/substrate/frame/nfts/runtime-api/Cargo.toml +++ b/substrate/frame/nfts/runtime-api/Cargo.toml @@ -14,9 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -pallet-nfts = { path = "..", default-features = false} -sp-api = { path = "../../../primitives/api", default-features = false} +pallet-nfts = { path = "..", default-features = false } +sp-api = { path = "../../../primitives/api", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] default = [ "std" ] -std = [ "codec/std", "pallet-nfts/std", "sp-api/std" ] +std = [ "codec/std", "pallet-nfts/std", "sp-api/std", "sp-std/std" ] diff --git a/substrate/frame/nfts/runtime-api/src/lib.rs b/substrate/frame/nfts/runtime-api/src/lib.rs index 77535c64069..816088f1b71 100644 --- a/substrate/frame/nfts/runtime-api/src/lib.rs +++ b/substrate/frame/nfts/runtime-api/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; -use sp_api::vec::Vec; +use sp_std::vec::Vec; sp_api::decl_runtime_apis! { pub trait NftsApi diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/construct_runtime.rs index 9ad51ad530e..b8341b25cb0 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/construct_runtime.rs @@ -27,13 +27,13 @@ use frame_support::{ }; use frame_system::limits::{BlockLength, BlockWeights}; use scale_info::TypeInfo; -use sp_api::RuntimeVersion; use sp_core::{sr25519, ConstU64}; use sp_runtime::{ generic, traits::{BlakeTwo256, Verify}, DispatchError, ModuleError, }; +use sp_version::RuntimeVersion; parameter_types! { pub static IntegrityTestExec: u32 = 0; diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index c3a68af097b..e95333c9eeb 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -15,17 +15,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } sp-api-proc-macro = { path = "proc-macro" } -sp-core = { path = "../core", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-externalities = { path = "../externalities", default-features = false, optional = true} -sp-version = { path = "../version", default-features = false} -sp-state-machine = { path = "../state-machine", default-features = false, optional = true} -sp-trie = { path = "../trie", default-features = false, optional = true} +sp-core = { path = "../core", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-externalities = { path = "../externalities", default-features = false, optional = true } +sp-version = { path = "../version", default-features = false } +sp-state-machine = { path = "../state-machine", default-features = false, optional = true } +sp-trie = { path = "../trie", default-features = false, optional = true } hash-db = { version = "0.16.0", optional = true } thiserror = { version = "1.0.48", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true} +sp-metadata-ir = { path = "../metadata-ir", default-features = false, optional = true } log = { version = "0.4.17", default-features = false } [dev-dependencies] diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index e261b162b5a..68f0a77a399 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -28,14 +28,14 @@ use syn::{ /// Generates the access to the `sc_client` crate. pub fn generate_crate_access() -> TokenStream { match crate_name("sp-api") { - Ok(FoundCrate::Itself) => quote!(sp_api), + Ok(FoundCrate::Itself) => quote!(sp_api::__private), Ok(FoundCrate::Name(renamed_name)) => { let renamed_name = Ident::new(&renamed_name, Span::call_site()); - quote!(#renamed_name) + quote!(#renamed_name::__private) }, Err(e) => if let Ok(FoundCrate::Name(name)) = crate_name(&"frame") { - let path = format!("{}::deps::{}", name, "sp_api"); + let path = format!("{}::deps::sp_api::__private", name); let path = syn::parse_str::(&path).expect("is a valid path; qed"); quote!( #path ) } else { diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index c122a2348b8..3b00e6b418b 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -70,48 +70,58 @@ // Make doc tests happy extern crate self as sp_api; +/// Private exports used by the macros. +/// +/// This is seen as internal API and can change at any point. #[doc(hidden)] -pub use codec::{self, Decode, DecodeLimit, Encode}; -#[doc(hidden)] -#[cfg(feature = "std")] -pub use hash_db::Hasher; -#[doc(hidden)] -pub use scale_info; -#[doc(hidden)] -pub use sp_core::offchain; -#[doc(hidden)] -#[cfg(not(feature = "std"))] -pub use sp_core::to_substrate_wasm_fn_return_value; -#[doc(hidden)] +pub mod __private { + #[cfg(feature = "std")] + mod std_imports { + pub use hash_db::Hasher; + pub use sp_core::traits::CallContext; + pub use sp_externalities::{Extension, Extensions}; + pub use sp_runtime::StateVersion; + pub use sp_state_machine::{ + Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, TrieBackend, + TrieBackendBuilder, + }; + } + #[cfg(feature = "std")] + pub use std_imports::*; + + pub use crate::*; + pub use codec::{self, Decode, DecodeLimit, Encode}; + pub use scale_info; + pub use sp_core::offchain; + #[cfg(not(feature = "std"))] + pub use sp_core::to_substrate_wasm_fn_return_value; + #[cfg(feature = "frame-metadata")] + pub use sp_metadata_ir::{self as metadata_ir, frame_metadata as metadata}; + pub use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, + transaction_validity::TransactionValidity, + RuntimeString, TransactionOutcome, + }; + pub use sp_std::{mem, slice, vec}; + pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; +} + #[cfg(feature = "std")] pub use sp_core::traits::CallContext; use sp_core::OpaqueMetadata; -#[doc(hidden)] #[cfg(feature = "std")] -pub use sp_externalities::{Extension, Extensions}; -#[doc(hidden)] -#[cfg(feature = "frame-metadata")] -pub use sp_metadata_ir::{self as metadata_ir, frame_metadata as metadata}; -#[doc(hidden)] +use sp_externalities::{Extension, Extensions}; +use sp_runtime::traits::Block as BlockT; #[cfg(feature = "std")] -pub use sp_runtime::StateVersion; -#[doc(hidden)] -pub use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Hash as HashT, HashingFor, Header as HeaderT, NumberFor}, - transaction_validity::TransactionValidity, - RuntimeString, TransactionOutcome, -}; -#[doc(hidden)] +use sp_runtime::traits::HashingFor; #[cfg(feature = "std")] -pub use sp_state_machine::{ - backend::AsTrieBackend, Backend as StateBackend, InMemoryBackend, OverlayedChanges, - StorageProof, TrieBackend, TrieBackendBuilder, -}; -#[doc(hidden)] -pub use sp_std::{mem, slice, vec}; -#[doc(hidden)] -pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; +pub use sp_runtime::TransactionOutcome; +#[cfg(feature = "std")] +pub use sp_state_machine::StorageProof; +#[cfg(feature = "std")] +use sp_state_machine::{backend::AsTrieBackend, Backend as StateBackend, OverlayedChanges}; +use sp_version::RuntimeVersion; #[cfg(feature = "std")] use std::cell::RefCell; diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index 5ff0a2ebc70..e78323c8980 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -15,19 +15,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } -sp-api = { path = "../../api", default-features = false} -sp-application-crypto = { path = "../../application-crypto", default-features = false} -sp-core = { path = "../../core", default-features = false} -sp-io = { path = "../../io", default-features = false} -sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-core = { path = "../../core", default-features = false } +sp-io = { path = "../../io", default-features = false } +sp-mmr-primitives = { path = "../../merkle-mountain-range", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } strum = { version = "0.24.1", features = ["derive"], default-features = false } lazy_static = "1.4.0" [dev-dependencies] array-bytes = "6.1" -w3f-bls = { version = "0.1.3", features = ["std"]} +w3f-bls = { version = "0.1.3", features = ["std"] } [features] default = [ "std" ] diff --git a/substrate/primitives/consensus/beefy/src/mmr.rs b/substrate/primitives/consensus/beefy/src/mmr.rs index 660506b8763..9ac1624ca75 100644 --- a/substrate/primitives/consensus/beefy/src/mmr.rs +++ b/substrate/primitives/consensus/beefy/src/mmr.rs @@ -150,8 +150,9 @@ pub use mmr_root_provider::MmrRootProvider; mod mmr_root_provider { use super::*; use crate::{known_payloads, payload::PayloadProvider, Payload}; - use sp_api::{NumberFor, ProvideRuntimeApi}; + use sp_api::ProvideRuntimeApi; use sp_mmr_primitives::MmrApi; + use sp_runtime::traits::NumberFor; use sp_std::{marker::PhantomData, sync::Arc}; /// A [`crate::Payload`] provider where payload is Merkle Mountain Range root hash. diff --git a/substrate/utils/frame/benchmarking-cli/src/block/bench.rs b/substrate/utils/frame/benchmarking-cli/src/block/bench.rs index a028c7d438e..ef8dc29dde8 100644 --- a/substrate/utils/frame/benchmarking-cli/src/block/bench.rs +++ b/substrate/utils/frame/benchmarking-cli/src/block/bench.rs @@ -25,9 +25,13 @@ use sc_cli::{Error, Result}; use sc_client_api::{ Backend as ClientBackend, BlockBackend, HeaderBackend, StorageProvider, UsageProvider, }; -use sp_api::{ApiExt, Core, HeaderT, ProvideRuntimeApi}; +use sp_api::{ApiExt, Core, ProvideRuntimeApi}; use sp_blockchain::Error::RuntimeApiError; -use sp_runtime::{generic::BlockId, traits::Block as BlockT, DigestItem, OpaqueExtrinsic}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, + DigestItem, OpaqueExtrinsic, +}; use sp_storage::StorageKey; use clap::Args; diff --git a/substrate/utils/frame/benchmarking-cli/src/storage/write.rs b/substrate/utils/frame/benchmarking-cli/src/storage/write.rs index 4def1909ead..65941497bda 100644 --- a/substrate/utils/frame/benchmarking-cli/src/storage/write.rs +++ b/substrate/utils/frame/benchmarking-cli/src/storage/write.rs @@ -18,10 +18,10 @@ use sc_cli::Result; use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider}; use sc_client_db::{DbHash, DbState, DbStateBuilder}; -use sp_api::StateBackend; use sp_blockchain::HeaderBackend; use sp_database::{ColumnId, Transaction}; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; +use sp_state_machine::Backend as StateBackend; use sp_trie::PrefixedMemoryDB; use log::{info, trace}; -- GitLab From 19c05e8d43fc4aef12668ecc9144bb9669cc7bbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Thu, 23 Nov 2023 17:00:54 +0000 Subject: [PATCH 106/199] Fix trait imports from sp-api (#2472) Broken after #2446. --- Cargo.lock | 1 + .../relay-chain-minimal-node/src/blockchain_rpc_client.rs | 3 ++- polkadot/node/subsystem-types/Cargo.toml | 1 + polkadot/node/subsystem-types/src/runtime_client.rs | 3 ++- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1fe9d1b8192..abd539ea799 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12860,6 +12860,7 @@ dependencies = [ "sp-authority-discovery", "sp-blockchain", "sp-consensus-babe", + "sp-runtime", "substrate-prometheus-endpoint", "thiserror", ] diff --git a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs index c40ca5c858b..d9e4155d9c5 100644 --- a/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs +++ b/cumulus/client/relay-chain-minimal-node/src/blockchain_rpc_client.rs @@ -28,8 +28,9 @@ use polkadot_primitives::{ }; use sc_authority_discovery::{AuthorityDiscovery, Error as AuthorityDiscoveryError}; use sc_client_api::AuxStore; -use sp_api::{ApiError, BlockT, HeaderT, NumberFor, RuntimeApiInfo}; +use sp_api::{ApiError, RuntimeApiInfo}; use sp_blockchain::Info; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; #[derive(Clone)] pub struct BlockChainRpcClient { diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 8e345cf222c..71e5257cab9 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -19,6 +19,7 @@ sc-network = { path = "../../../substrate/client/network" } sp-api = { path = "../../../substrate/primitives/api" } sp-blockchain = { path = "../../../substrate/primitives/blockchain" } sp-consensus-babe = { path = "../../../substrate/primitives/consensus/babe" } +sp-runtime = { path = "../../../substrate/primitives/runtime" } sp-authority-discovery = { path = "../../../substrate/primitives/authority-discovery" } sc-client-api = { path = "../../../substrate/client/api" } sc-transaction-pool-api = { path = "../../../substrate/client/transaction-pool/api" } diff --git a/polkadot/node/subsystem-types/src/runtime_client.rs b/polkadot/node/subsystem-types/src/runtime_client.rs index 36e3365cf08..21df1483b9e 100644 --- a/polkadot/node/subsystem-types/src/runtime_client.rs +++ b/polkadot/node/subsystem-types/src/runtime_client.rs @@ -25,10 +25,11 @@ use polkadot_primitives::{ }; use sc_client_api::HeaderBackend; use sc_transaction_pool_api::OffchainTransactionPoolFactory; -use sp_api::{ApiError, ApiExt, HeaderT, NumberFor, ProvideRuntimeApi}; +use sp_api::{ApiError, ApiExt, ProvideRuntimeApi}; use sp_authority_discovery::AuthorityDiscoveryApi; use sp_blockchain::Info; use sp_consensus_babe::{BabeApi, Epoch}; +use sp_runtime::traits::{Header as HeaderT, NumberFor}; use std::{collections::BTreeMap, sync::Arc}; /// Offers header utilities. -- GitLab From c29b74dc36fc20a76cee4a029a70e7ad42fa0b6e Mon Sep 17 00:00:00 2001 From: Ross Bulat Date: Fri, 24 Nov 2023 00:14:59 +0700 Subject: [PATCH 107/199] Amend staking docs to account for state of controller deprecation (#2451) Amends some staking pallet docs, and deprecation comment, to adjust to the latest controller deprecation state. Note, do we need the `README.md` file, which is a duplicate of the pallet docs? Docs would be easier to maintain, and less ambiguity for devs to refer to, if we had one source of truth in the generated pallet docs. --- substrate/frame/staking/README.md | 12 +-- substrate/frame/staking/src/lib.rs | 93 ++++++++++----------- substrate/frame/staking/src/pallet/impls.rs | 2 +- 3 files changed, 50 insertions(+), 57 deletions(-) diff --git a/substrate/frame/staking/README.md b/substrate/frame/staking/README.md index 8c91cfcaa7f..2938e2fe770 100644 --- a/substrate/frame/staking/README.md +++ b/substrate/frame/staking/README.md @@ -24,7 +24,7 @@ be found not to be discharging its duties properly. - Nominating: The process of placing staked funds behind one or more validators in order to share in any reward, and punishment, they take. - Stash account: The account holding an owner's funds used for staking. -- Controller account: The account that controls an owner's funds for staking. +- Controller account (being deprecated): The account that controls an owner's funds for staking. - Era: A (whole) number of sessions, which is the period that the validator set (and each validator's active nominator set) is recalculated and where rewards are paid out. - Slash: The punishment of a staker by reducing its funds. @@ -45,10 +45,10 @@ The staking system in Substrate NPoS is designed to make the following possible: Almost any interaction with the Staking module requires a process of _**bonding**_ (also known as being a _staker_). To become *bonded*, a fund-holding account known as the _stash account_, which holds some or all of the funds that become -frozen in place as part of the staking process, is paired with an active **controller** account, which issues -instructions on how they shall be used. +frozen in place as part of the staking process. The controller account, which this pallet now assigns the stash account to, +issues instructions on how funds shall be used. -An account pair can become bonded using the +An account can become a bonded stash account using the [`bond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.bond) call. Stash accounts can update their associated controller back to their stash account using the @@ -231,8 +231,8 @@ following: Any funds already placed into stash can be the target of the following operations: The controller account can free a portion (or all) of the funds using the -[`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond) call. Note that the funds -are not immediately accessible. Instead, a duration denoted by +[`unbond`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.unbond) call. Note that the +funds are not immediately accessible. Instead, a duration denoted by [`BondingDuration`](https://docs.rs/pallet-staking/latest/pallet_staking/trait.Config.html#associatedtype.BondingDuration) (in number of eras) must pass until the funds can actually be removed. Once the `BondingDuration` is over, the [`withdraw_unbonded`](https://docs.rs/pallet-staking/latest/pallet_staking/enum.Call.html#variant.withdraw_unbonded) diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 2cfee38ab4f..de989e8943d 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -41,7 +41,7 @@ //! - Nominating: The process of placing staked funds behind one or more validators in order to //! share in any reward, and punishment, they take. //! - Stash account: The account holding an owner's funds used for staking. -//! - Controller account: The account that controls an owner's funds for staking. +//! - Controller account (being deprecated): The account that controls an owner's funds for staking. //! - Era: A (whole) number of sessions, which is the period that the validator set (and each //! validator's active nominator set) is recalculated and where rewards are paid out. //! - Slash: The punishment of a staker by reducing its funds. @@ -61,20 +61,20 @@ //! //! Almost any interaction with the Staking pallet requires a process of _**bonding**_ (also known //! as being a _staker_). To become *bonded*, a fund-holding register known as the _stash account_, -//! which holds some or all of the funds that become frozen in place as part of the staking process, -//! is paired with an active **controller** account, which issues instructions on how they shall be -//! used. +//! which holds some or all of the funds that become frozen in place as part of the staking process. +//! The controller account, which this pallet now assigns the stash account to, issues instructions +//! on how funds shall be used. //! -//! An account pair can become bonded using the [`bond`](Call::bond) call. +//! An account can become a bonded stash account using the [`bond`](Call::bond) call. //! -//! Stash accounts can update their associated controller back to the stash account using the +//! In the event stash accounts registered a unique controller account before the controller account +//! deprecation, they can update their associated controller back to the stash account using the //! [`set_controller`](Call::set_controller) call. //! //! There are three possible roles that any staked account pair can be in: `Validator`, `Nominator` -//! and `Idle` (defined in [`StakerStatus`]). There are three -//! corresponding instructions to change between roles, namely: -//! [`validate`](Call::validate), -//! [`nominate`](Call::nominate), and [`chill`](Call::chill). +//! and `Idle` (defined in [`StakerStatus`]). There are three corresponding instructions to change +//! between roles, namely: [`validate`](Call::validate), [`nominate`](Call::nominate), and +//! [`chill`](Call::chill). //! //! #### Validating //! @@ -85,14 +85,13 @@ //! _might_ get elected at the _next era_ as a validator. The result of the election is determined //! by nominators and their votes. //! -//! An account can become a validator candidate via the -//! [`validate`](Call::validate) call. +//! An account can become a validator candidate via the [`validate`](Call::validate) call. //! //! #### Nomination //! //! A **nominator** does not take any _direct_ role in maintaining the network, instead, it votes on -//! a set of validators to be elected. Once interest in nomination is stated by an account, it -//! takes effect at the next election round. The funds in the nominator's stash account indicate the +//! a set of validators to be elected. Once interest in nomination is stated by an account, it takes +//! effect at the next election round. The funds in the nominator's stash account indicate the //! _weight_ of its vote. Both the rewards and any punishment that a validator earns are shared //! between the validator and its nominators. This rule incentivizes the nominators to NOT vote for //! the misbehaving/offline validators as much as possible, simply because the nominators will also @@ -104,8 +103,8 @@ //! //! Staking is closely related to elections; actual validators are chosen from among all potential //! validators via election by the potential validators and nominators. To reduce use of the phrase -//! "potential validators and nominators", we often use the term **voters**, who are simply -//! the union of potential validators and nominators. +//! "potential validators and nominators", we often use the term **voters**, who are simply the +//! union of potential validators and nominators. //! //! #### Rewards and Slash //! @@ -117,10 +116,9 @@ //! `payout_stakers`, which pays the reward to the validator as well as its nominators. Only //! [`Config::MaxExposurePageSize`] nominator rewards can be claimed in a single call. When the //! number of nominators exceeds [`Config::MaxExposurePageSize`], then the exposed nominators are -//! stored in multiple pages, with each page containing up to -//! [`Config::MaxExposurePageSize`] nominators. To pay out all nominators, `payout_stakers` must be -//! called once for each available page. Paging exists to limit the i/o cost to mutate storage for -//! each nominator's account. +//! stored in multiple pages, with each page containing up to [`Config::MaxExposurePageSize`] +//! nominators. To pay out all nominators, `payout_stakers` must be called once for each available +//! page. Paging exists to limit the i/o cost to mutate storage for each nominator's account. //! //! Slashing can occur at any point in time, once misbehavior is reported. Once slashing is //! determined, a value is deducted from the balance of the validator and all the nominators who @@ -165,18 +163,18 @@ //! //! #[frame_support::pallet(dev_mode)] //! pub mod pallet { -//! use super::*; -//! use frame_support::pallet_prelude::*; -//! use frame_system::pallet_prelude::*; +//! use super::*; +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; //! -//! #[pallet::pallet] -//! pub struct Pallet(_); +//! #[pallet::pallet] +//! pub struct Pallet(_); //! -//! #[pallet::config] -//! pub trait Config: frame_system::Config + staking::Config {} +//! #[pallet::config] +//! pub trait Config: frame_system::Config + staking::Config {} //! -//! #[pallet::call] -//! impl Pallet { +//! #[pallet::call] +//! impl Pallet { //! /// Reward a validator. //! #[pallet::weight(0)] //! pub fn reward_myself(origin: OriginFor) -> DispatchResult { @@ -193,8 +191,8 @@ //! //! ### Era payout //! -//! The era payout is computed using yearly inflation curve defined at -//! [`Config::EraPayout`] as such: +//! The era payout is computed using yearly inflation curve defined at [`Config::EraPayout`] as +//! such: //! //! ```nocompile //! staker_payout = yearly_inflation(npos_token_staked / total_tokens) * total_tokens / era_per_year @@ -204,8 +202,7 @@ //! ```nocompile //! remaining_payout = max_yearly_inflation * total_tokens / era_per_year - staker_payout //! ``` -//! The remaining reward is send to the configurable end-point -//! [`Config::RewardRemainder`]. +//! The remaining reward is send to the configurable end-point [`Config::RewardRemainder`]. //! //! ### Reward Calculation //! @@ -219,9 +216,8 @@ //! they received during the era. Points are added to a validator using //! [`reward_by_ids`](Pallet::reward_by_ids). //! -//! [`Pallet`] implements -//! [`pallet_authorship::EventHandler`] to add reward -//! points to block producer and block producer of referenced uncles. +//! [`Pallet`] implements [`pallet_authorship::EventHandler`] to add reward points to block producer +//! and block producer of referenced uncles. //! //! The validator and its nominator split their reward as following: //! @@ -232,13 +228,12 @@ //! validator, proportional to the value staked behind the validator (_i.e._ dividing the //! [`own`](Exposure::own) or [`others`](Exposure::others) by [`total`](Exposure::total) in //! [`Exposure`]). Note that payouts are made in pages with each page capped at -//! [`Config::MaxExposurePageSize`] nominators. The distribution of nominators across -//! pages may be unsorted. The total commission is paid out proportionally across pages based on the -//! total stake of the page. +//! [`Config::MaxExposurePageSize`] nominators. The distribution of nominators across pages may be +//! unsorted. The total commission is paid out proportionally across pages based on the total stake +//! of the page. //! //! All entities who receive a reward have the option to choose their reward destination through the -//! [`Payee`] storage item (see -//! [`set_payee`](Call::set_payee)), to be one of the following: +//! [`Payee`] storage item (see [`set_payee`](Call::set_payee)), to be one of the following: //! //! - Stash account, not increasing the staked value. //! - Stash account, also increasing the staked value. @@ -249,12 +244,10 @@ //! Any funds already placed into stash can be the target of the following operations: //! //! The controller account can free a portion (or all) of the funds using the -//! [`unbond`](Call::unbond) call. Note that the funds are not immediately -//! accessible. Instead, a duration denoted by -//! [`Config::BondingDuration`] (in number of eras) must -//! pass until the funds can actually be removed. Once the `BondingDuration` is over, the -//! [`withdraw_unbonded`](Call::withdraw_unbonded) call can be used to actually -//! withdraw the funds. +//! [`unbond`](Call::unbond) call. Note that the funds are not immediately accessible. Instead, a +//! duration denoted by [`Config::BondingDuration`] (in number of eras) must pass until the funds +//! can actually be removed. Once the `BondingDuration` is over, the +//! [`withdraw_unbonded`](Call::withdraw_unbonded) call can be used to actually withdraw the funds. //! //! Note that there is a limitation to the number of fund-chunks that can be scheduled to be //! unlocked in the future via [`unbond`](Call::unbond). In case this maximum @@ -274,8 +267,8 @@ //! //! ## GenesisConfig //! -//! The Staking pallet depends on the [`GenesisConfig`]. The -//! `GenesisConfig` is optional and allow to set some initial stakers. +//! The Staking pallet depends on the [`GenesisConfig`]. The `GenesisConfig` is optional and allow +//! to set some initial stakers. //! //! ## Related Modules //! @@ -405,7 +398,7 @@ pub enum RewardDestination { /// Pay into the stash account, not increasing the amount at stake. Stash, #[deprecated( - note = "`Controller` will be removed after January 2024. Use `Account(controller)` instead. This variant now behaves the same as `Stash` variant." + note = "`Controller` will be removed after January 2024. Use `Account(controller)` instead." )] Controller, /// Pay into a specified account. diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 6ea8e4c9d3b..e18d9378b5e 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -359,7 +359,7 @@ impl Pallet { #[allow(deprecated)] RewardDestination::Controller => Self::bonded(stash) .map(|controller| { - defensive!("Paying out controller as reward destination which is deprecated and should be migrated"); + defensive!("Paying out controller as reward destination which is deprecated and should be migrated."); // This should never happen once payees with a `Controller` variant have been migrated. // But if it does, just pay the controller account. T::Currency::deposit_creating(&controller, amount) -- GitLab From a00a767604db59e4f045f8e73e90c6c8b7ff95c5 Mon Sep 17 00:00:00 2001 From: Lulu Date: Fri, 24 Nov 2023 09:07:35 +0000 Subject: [PATCH 108/199] Make publish CI consistant and bump to v0.3.0 (#2453) --- .github/workflows/check-publish.yml | 7 ++----- .github/workflows/claim-crates.yml | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/check-publish.yml b/.github/workflows/check-publish.yml index c0d2b889381..db0863888b8 100644 --- a/.github/workflows/check-publish.yml +++ b/.github/workflows/check-publish.yml @@ -10,10 +10,7 @@ on: jobs: check-publish: - strategy: - matrix: - os: ["ubuntu-latest"] - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest steps: - uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0 @@ -23,7 +20,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish --profile dev + run: cargo install parity-publish@0.3.0 - name: parity-publish check run: parity-publish check --allow-unpublished diff --git a/.github/workflows/claim-crates.yml b/.github/workflows/claim-crates.yml index 345d24c7566..0bd5593b54f 100644 --- a/.github/workflows/claim-crates.yml +++ b/.github/workflows/claim-crates.yml @@ -18,7 +18,7 @@ jobs: cache-on-failure: true - name: install parity-publish - run: cargo install parity-publish@0.2.0 + run: cargo install parity-publish@0.3.0 - name: parity-publish claim env: -- GitLab From 4163152506883c985e82d2730693910aabd65a2d Mon Sep 17 00:00:00 2001 From: Xiliang Chen Date: Fri, 24 Nov 2023 22:13:57 +1300 Subject: [PATCH 109/199] pallet-xcm: ensure xcm outcome is always complete, revert effects otherwise (#2405) On extrinsics/call, ensure local XCM execution is complete/successful. Otherwise, fail the extrinsic so that state changes don't get committed to the db. Added regression tests that fail without the fix. fixes #2237 --------- Co-authored-by: Adrian Catangiu --- Cargo.lock | 1 + .../assets/test-utils/src/test_cases.rs | 8 +- .../test-utils/src/test_cases_over_bridge.rs | 4 +- polkadot/xcm/pallet-xcm/src/lib.rs | 28 ++++-- polkadot/xcm/pallet-xcm/src/mock.rs | 7 ++ .../pallet-xcm/src/tests/assets_transfer.rs | 58 +++++++++++++ polkadot/xcm/pallet-xcm/src/tests/mod.rs | 48 ++++++++++- polkadot/xcm/src/v3/traits.rs | 4 +- .../xcm-executor/integration-tests/Cargo.toml | 1 + .../xcm-executor/integration-tests/src/lib.rs | 86 ++++++++++++++----- 10 files changed, 210 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index abd539ea799..ab51be3d5db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21504,6 +21504,7 @@ dependencies = [ "frame-support", "frame-system", "futures", + "pallet-transaction-payment", "pallet-xcm", "parity-scale-codec", "polkadot-test-client", diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index f1cc76350a0..915d99470c3 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -152,7 +152,7 @@ pub fn teleports_for_native_asset_works< hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Parent), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); // check Balances after assert_ne!(>::free_balance(&target_account), 0.into()); @@ -499,7 +499,7 @@ pub fn teleports_for_foreign_assets_works< hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); // checks target_account after assert_eq!( @@ -1211,7 +1211,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); // check events let mut events = >::events() @@ -1319,7 +1319,7 @@ pub fn create_and_manage_foreign_assets_for_local_consensus_parachain_assets_wor hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); additional_checks_after(); }) diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 851fcd5c7d6..8007b275cb5 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -483,7 +483,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< XcmReceivedFrom::Sibling, ), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); // author actual balance after (received fees from Trader for ForeignAssets) let author_received_fees = @@ -588,7 +588,7 @@ pub fn report_bridge_status_from_xcm_bridge_router_works< hash, RuntimeHelper::::xcm_max_weight(XcmReceivedFrom::Sibling), ); - assert_eq!(outcome.ensure_complete(), Ok(())); + assert_ok!(outcome.ensure_complete()); assert_eq!(is_congested, pallet_xcm_bridge_hub_router::Pallet::::bridge().is_congested); }; diff --git a/polkadot/xcm/pallet-xcm/src/lib.rs b/polkadot/xcm/pallet-xcm/src/lib.rs index 38ea7555fc3..74a24b132da 100644 --- a/polkadot/xcm/pallet-xcm/src/lib.rs +++ b/polkadot/xcm/pallet-xcm/src/lib.rs @@ -547,7 +547,7 @@ pub mod pallet { InvalidAssetUnsupportedReserve, /// Too many assets with different reserve locations have been attempted for transfer. TooManyReserves, - /// Local XCM execution of asset transfer incomplete. + /// Local XCM execution incomplete. LocalExecutionIncomplete, } @@ -1009,8 +1009,14 @@ pub mod pallet { message: Box::RuntimeCall>>, max_weight: Weight, ) -> DispatchResultWithPostInfo { + log::trace!(target: "xcm::pallet_xcm::execute", "message {:?}, max_weight {:?}", message, max_weight); let outcome = >::execute(origin, message, max_weight)?; - Ok(Some(outcome.weight_used().saturating_add(T::WeightInfo::execute())).into()) + let weight_used = outcome.weight_used(); + outcome.ensure_complete().map_err(|error| { + log::error!(target: "xcm::pallet_xcm::execute", "XCM execution failed with error {:?}", error); + Error::::LocalExecutionIncomplete + })?; + Ok(Some(weight_used.saturating_add(T::WeightInfo::execute())).into()) } /// Extoll that a particular destination can be communicated with through a particular @@ -1495,13 +1501,25 @@ impl Pallet { let outcome = T::XcmExecutor::execute_xcm_in_credit(origin, local_xcm, hash, weight, weight); Self::deposit_event(Event::Attempted { outcome: outcome.clone() }); - if let Some(remote_xcm) = remote_xcm { - outcome.ensure_complete().map_err(|_| Error::::LocalExecutionIncomplete)?; + outcome.ensure_complete().map_err(|error| { + log::error!( + target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + "XCM execution failed with error {:?}", error + ); + Error::::LocalExecutionIncomplete + })?; + if let Some(remote_xcm) = remote_xcm { let (ticket, price) = validate_send::(dest, remote_xcm.clone()) .map_err(Error::::from)?; if origin != Here.into_location() { - Self::charge_fees(origin, price).map_err(|_| Error::::FeesNotMet)?; + Self::charge_fees(origin, price).map_err(|error| { + log::error!( + target: "xcm::pallet_xcm::build_and_execute_xcm_transfer_type", + "Unable to charge fee with error {:?}", error + ); + Error::::FeesNotMet + })?; } let message_id = T::XcmRouter::deliver(ticket).map_err(Error::::from)?; diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index e744cefb162..0b0f795100c 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -153,6 +153,7 @@ construct_runtime!( thread_local! { pub static SENT_XCM: RefCell)>> = RefCell::new(Vec::new()); + pub static FAIL_SEND_XCM: RefCell = RefCell::new(false); } pub(crate) fn sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { SENT_XCM.with(|q| (*q.borrow()).clone()) @@ -164,6 +165,9 @@ pub(crate) fn take_sent_xcm() -> Vec<(MultiLocation, Xcm<()>)> { r }) } +pub(crate) fn set_send_xcm_artificial_failure(should_fail: bool) { + FAIL_SEND_XCM.with(|q| *q.borrow_mut() = should_fail); +} /// Sender that never returns error. pub struct TestSendXcm; impl SendXcm for TestSendXcm { @@ -172,6 +176,9 @@ impl SendXcm for TestSendXcm { dest: &mut Option, msg: &mut Option>, ) -> SendResult<(MultiLocation, Xcm<()>)> { + if FAIL_SEND_XCM.with(|q| *q.borrow()) { + return Err(SendError::Transport("Intentional send failure used in tests")) + } let pair = (dest.take().unwrap(), msg.take().unwrap()); Ok((pair, MultiAssets::new())) } diff --git a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs index bf39e1ca288..d1b298765e2 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/assets_transfer.rs @@ -1457,3 +1457,61 @@ fn reserve_transfer_assets_with_filtered_teleported_fee_disallowed() { ); }); } + +/// Test failure to complete execution of local XCM instructions reverts intermediate side-effects. +/// +/// Extrinsic will execute XCM to withdraw & burn reserve-based assets, then fail sending XCM to +/// reserve chain for releasing reserve assets. Assert that the previous instructions (withdraw & +/// burn) effects are reverted. +#[test] +fn intermediary_error_reverts_side_effects() { + let balances = vec![(ALICE, INITIAL_BALANCE)]; + let beneficiary: MultiLocation = + Junction::AccountId32 { network: None, id: ALICE.into() }.into(); + new_test_ext_with_balances(balances).execute_with(|| { + // create sufficient foreign asset USDC (0 total issuance) + let usdc_initial_local_amount = 142; + let (_, usdc_chain_sovereign_account, usdc_id_multilocation) = set_up_foreign_asset( + USDC_RESERVE_PARA_ID, + Some(USDC_INNER_JUNCTION), + usdc_initial_local_amount, + true, + ); + + // transfer destination is some other parachain + let dest = RelayLocation::get().pushed_with_interior(Parachain(OTHER_PARA_ID)).unwrap(); + + let assets: MultiAssets = vec![(usdc_id_multilocation, SEND_AMOUNT).into()].into(); + let fee_index = 0; + + // balances checks before + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + + // introduce artificial error in sending outbound XCM + set_send_xcm_artificial_failure(true); + + // do the transfer - extrinsic should completely fail on xcm send failure + assert!(XcmPallet::limited_reserve_transfer_assets( + RuntimeOrigin::signed(ALICE), + Box::new(dest.into()), + Box::new(beneficiary.into()), + Box::new(assets.into()), + fee_index as u32, + Unlimited, + ) + .is_err()); + + // Alice no changes + assert_eq!(Assets::balance(usdc_id_multilocation, ALICE), usdc_initial_local_amount); + assert_eq!(Balances::free_balance(ALICE), INITIAL_BALANCE); + // Destination account (parachain account) no changes + assert_eq!(Balances::free_balance(usdc_chain_sovereign_account.clone()), 0); + assert_eq!(Assets::balance(usdc_id_multilocation, usdc_chain_sovereign_account), 0); + // Verify total and active issuance of USDC has not changed + assert_eq!(Assets::total_issuance(usdc_id_multilocation), usdc_initial_local_amount); + assert_eq!(Assets::active_issuance(usdc_id_multilocation), usdc_initial_local_amount); + // Verify no XCM program sent + assert_eq!(sent_xcm(), vec![]); + }); +} diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index 72814e507f2..056c7dcc196 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -445,7 +445,7 @@ fn trapped_assets_can_be_claimed() { assert_eq!(AssetTraps::::iter().collect::>(), vec![]); let weight = BaseXcmWeight::get() * 3; - assert_ok!(XcmPallet::execute( + assert_ok!(>::execute( RuntimeOrigin::signed(ALICE), Box::new(VersionedXcm::from(Xcm(vec![ ClaimAsset { assets: (Here, SEND_AMOUNT).into(), ticket: Here.into() }, @@ -459,6 +459,52 @@ fn trapped_assets_can_be_claimed() { }); } +/// Test failure to complete execution reverts intermediate side-effects. +/// +/// XCM program will withdraw and deposit some assets, then fail execution of a further withdraw. +/// Assert that the previous instructions effects are reverted. +#[test] +fn incomplete_execute_reverts_side_effects() { + let balances = vec![(ALICE, INITIAL_BALANCE), (BOB, INITIAL_BALANCE)]; + new_test_ext_with_balances(balances).execute_with(|| { + let weight = BaseXcmWeight::get() * 4; + let dest: MultiLocation = Junction::AccountId32 { network: None, id: BOB.into() }.into(); + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + let amount_to_send = INITIAL_BALANCE - ExistentialDeposit::get(); + let assets: MultiAssets = (Here, amount_to_send).into(); + let result = XcmPallet::execute( + RuntimeOrigin::signed(ALICE), + Box::new(VersionedXcm::from(Xcm(vec![ + // Withdraw + BuyExec + Deposit should work + WithdrawAsset(assets.clone()), + buy_execution(assets.inner()[0].clone()), + DepositAsset { assets: assets.clone().into(), beneficiary: dest }, + // Withdrawing once more will fail because of InsufficientBalance, and we expect to + // revert the effects of the above instructions as well + WithdrawAsset(assets), + ]))), + weight, + ); + // all effects are reverted and balances unchanged for either sender or receiver + assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); + assert_eq!(Balances::total_balance(&BOB), INITIAL_BALANCE); + assert_eq!( + result, + Err(sp_runtime::DispatchErrorWithPostInfo { + post_info: frame_support::dispatch::PostDispatchInfo { + actual_weight: None, + pays_fee: frame_support::dispatch::Pays::Yes, + }, + error: sp_runtime::DispatchError::Module(sp_runtime::ModuleError { + index: 4, + error: [24, 0, 0, 0,], + message: Some("LocalExecutionIncomplete") + }) + }) + ); + }); +} + #[test] fn fake_latest_versioned_multilocation_works() { use codec::Encode; diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index 1043d17b710..6054bf1456a 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -275,9 +275,9 @@ pub enum Outcome { } impl Outcome { - pub fn ensure_complete(self) -> Result { + pub fn ensure_complete(self) -> result::Result { match self { - Outcome::Complete(_) => Ok(()), + Outcome::Complete(weight) => Ok(weight), Outcome::Incomplete(_, e) => Err(e), Outcome::Error(e) => Err(e), } diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index d869fc6f2dc..ddb45965ee4 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -12,6 +12,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } frame-support = { path = "../../../../substrate/frame/support", default-features = false } frame-system = { path = "../../../../substrate/frame/system" } futures = "0.3.21" +pallet-transaction-payment = { path = "../../../../substrate/frame/transaction-payment" } pallet-xcm = { path = "../../pallet-xcm" } polkadot-test-client = { path = "../../../node/test/client" } polkadot-test-runtime = { path = "../../../runtime/test-runtime" } diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index d8c77f8317e..c02cb218885 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -76,33 +76,59 @@ fn transact_recursion_limit_works() { sp_tracing::try_init_simple(); let mut client = TestClientBuilder::new().build(); - let mut msg = Xcm(vec![ClearOrigin]); - let max_weight = ::Weigher::weight(&mut msg).unwrap(); - let mut call = polkadot_test_runtime::RuntimeCall::Xcm(pallet_xcm::Call::execute { - message: Box::new(VersionedXcm::from(msg)), - max_weight, - }); - - for _ in 0..11 { - let mut msg = Xcm(vec![ - WithdrawAsset((Parent, 1_000).into()), - BuyExecution { fees: (Parent, 1).into(), weight_limit: Unlimited }, + let base_xcm = |call: polkadot_test_runtime::RuntimeCall| { + Xcm(vec![ + WithdrawAsset((Here, 1_000).into()), + BuyExecution { fees: (Here, 1).into(), weight_limit: Unlimited }, Transact { origin_kind: OriginKind::Native, require_weight_at_most: call.get_dispatch_info().weight, call: call.encode().into(), }, - ]); + ]) + }; + let mut call: Option = None; + // set up transacts with recursive depth of 11 + for depth in (1..12).rev() { + let mut msg; + match depth { + // this one should fail with `XcmError::ExceedsStackLimit` + 11 => { + msg = Xcm(vec![ClearOrigin]); + }, + // this one checks that the inner one (depth 11) fails as expected, + // itself should not fail => should have outcome == Complete + 10 => { + let inner_call = call.take().unwrap(); + let expected_transact_status = + sp_runtime::DispatchError::Module(sp_runtime::ModuleError { + index: 27, + error: [24, 0, 0, 0], + message: Some("LocalExecutionIncomplete"), + }) + .encode() + .into(); + msg = base_xcm(inner_call); + msg.inner_mut().push(ExpectTransactStatus(expected_transact_status)); + }, + // these are the outer 9 calls that expect `ExpectTransactStatus(Success)` + d if d >= 1 && d <= 9 => { + let inner_call = call.take().unwrap(); + msg = base_xcm(inner_call); + msg.inner_mut().push(ExpectTransactStatus(MaybeErrorCode::Success)); + }, + _ => unreachable!(), + } let max_weight = ::Weigher::weight(&mut msg).unwrap(); - call = polkadot_test_runtime::RuntimeCall::Xcm(pallet_xcm::Call::execute { - message: Box::new(VersionedXcm::from(msg)), + call = Some(polkadot_test_runtime::RuntimeCall::Xcm(pallet_xcm::Call::execute { + message: Box::new(VersionedXcm::from(msg.clone())), max_weight, - }); + })); } let mut block_builder = client.init_polkadot_block_builder(); - let execute = construct_extrinsic(&client, call, sp_keyring::Sr25519Keyring::Alice, 0); + let execute = construct_extrinsic(&client, call.unwrap(), sp_keyring::Sr25519Keyring::Alice, 0); block_builder.push_polkadot_extrinsic(execute).expect("pushes extrinsic"); @@ -113,11 +139,29 @@ fn transact_recursion_limit_works() { .expect("imports the block"); client.state_at(block_hash).expect("state should exist").inspect_state(|| { - assert!(polkadot_test_runtime::System::events().iter().any(|r| matches!( - r.event, - polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted { - outcome: Outcome::Incomplete(_, XcmError::ExceedsStackLimit) - }), + let events = polkadot_test_runtime::System::events(); + // verify 10 pallet_xcm calls were successful + assert_eq!( + polkadot_test_runtime::System::events() + .iter() + .filter(|r| matches!( + r.event, + polkadot_test_runtime::RuntimeEvent::Xcm(pallet_xcm::Event::Attempted { + outcome: Outcome::Complete(_) + }), + )) + .count(), + 10 + ); + // verify transaction fees have been paid + assert!(events.iter().any(|r| matches!( + &r.event, + polkadot_test_runtime::RuntimeEvent::TransactionPayment( + pallet_transaction_payment::Event::TransactionFeePaid { + who: payer, + .. + } + ) if *payer == sp_keyring::Sr25519Keyring::Alice.into(), ))); }); } -- GitLab From f086d540ae06b891f1a03bc1281809821d38a6f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Fri, 24 Nov 2023 10:36:22 +0100 Subject: [PATCH 110/199] pallet-staking: Converts all math operations to safe (#2435) This PR converts unsafe math operations to safe in the staking pallet. Closes https://github.com/paritytech/polkadot-sdk/issues/2431 --------- Co-authored-by: Ankan <10196091+Ank4n@users.noreply.github.com> --- substrate/frame/staking/src/lib.rs | 11 +++++---- substrate/frame/staking/src/pallet/impls.rs | 14 ++++++----- substrate/frame/staking/src/pallet/mod.rs | 4 ++- substrate/frame/staking/src/slashing.rs | 27 ++++++++++++--------- 4 files changed, 33 insertions(+), 23 deletions(-) diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index de989e8943d..e6250bb9681 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -530,12 +530,12 @@ impl StakingLedger { let mut unlocking_balance = BalanceOf::::zero(); while let Some(last) = self.unlocking.last_mut() { - if unlocking_balance + last.value <= value { + if unlocking_balance.defensive_saturating_add(last.value) <= value { unlocking_balance += last.value; self.active += last.value; self.unlocking.pop(); } else { - let diff = value - unlocking_balance; + let diff = value.defensive_saturating_sub(unlocking_balance); unlocking_balance += diff; self.active += diff; @@ -589,7 +589,7 @@ impl StakingLedger { // for a `slash_era = x`, any chunk that is scheduled to be unlocked at era `x + 28` // (assuming 28 is the bonding duration) onwards should be slashed. - let slashable_chunks_start = slash_era + T::BondingDuration::get(); + let slashable_chunks_start = slash_era.saturating_add(T::BondingDuration::get()); // `Some(ratio)` if this is proportional, with `ratio`, `None` otherwise. In both cases, we // slash first the active chunk, and then `slash_chunks_priority`. @@ -1185,8 +1185,9 @@ impl EraInfo { let nominator_count = exposure.others.len(); // expected page count is the number of nominators divided by the page size, rounded up. - let expected_page_count = - nominator_count.defensive_saturating_add(page_size as usize - 1) / page_size as usize; + let expected_page_count = nominator_count + .defensive_saturating_add((page_size as usize).defensive_saturating_sub(1)) + .saturating_div(page_size as usize); let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size); defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count"); diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index e18d9378b5e..d294686751c 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -27,8 +27,8 @@ use frame_support::{ dispatch::WithPostDispatchInfo, pallet_prelude::*, traits::{ - Currency, Defensive, EstimateNextNewSession, Get, Imbalance, Len, OnUnbalanced, TryCollect, - UnixTime, + Currency, Defensive, DefensiveSaturating, EstimateNextNewSession, Get, Imbalance, Len, + OnUnbalanced, TryCollect, UnixTime, }, weights::Weight, }; @@ -148,7 +148,7 @@ impl Pallet { // `consolidate_unlocked` strictly subtracts balance. if new_total < old_total { // Already checked that this won't overflow by entry condition. - let value = old_total - new_total; + let value = old_total.defensive_saturating_sub(new_total); Self::deposit_event(Event::::Withdrawn { stash, amount: value }); } @@ -262,7 +262,8 @@ impl Pallet { // total commission validator takes across all nominator pages let validator_total_commission_payout = validator_commission * validator_total_payout; - let validator_leftover_payout = validator_total_payout - validator_total_commission_payout; + let validator_leftover_payout = + validator_total_payout.defensive_saturating_sub(validator_total_commission_payout); // Now let's calculate how this is split to the validator. let validator_exposure_part = Perbill::from_rational(exposure.own(), exposure.total()); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; @@ -475,7 +476,7 @@ impl Pallet { bonded.push((active_era, start_session)); if active_era > bonding_duration { - let first_kept = active_era - bonding_duration; + let first_kept = active_era.defensive_saturating_sub(bonding_duration); // Prune out everything that's from before the first-kept index. let n_to_prune = @@ -501,7 +502,8 @@ impl Pallet { if let Some(active_era_start) = active_era.start { let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); - let era_duration = (now_as_millis_u64 - active_era_start).saturated_into::(); + let era_duration = (now_as_millis_u64.defensive_saturating_sub(active_era_start)) + .saturated_into::(); let staked = Self::eras_total_stake(&active_era.index); let issuance = T::Currency::total_issuance(); let (validator_payout, remainder) = diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index a68e9c90da9..1f79ef63a47 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -1068,7 +1068,9 @@ pub mod pallet { ensure!(ledger.active >= min_active_bond, Error::::InsufficientBond); // Note: in case there is no current era it is fine to bond one era more. - let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); + let era = Self::current_era() + .unwrap_or(0) + .defensive_saturating_add(T::BondingDuration::get()); if let Some(chunk) = ledger.unlocking.last_mut().filter(|chunk| chunk.era == era) { // To keep the chunk count down, we only keep one chunk per era. Since // `unlocking` is a FiFo queue, if a chunk exists for `era` we know that it will diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 0d84d503733..709fd1441ec 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -57,7 +57,7 @@ use crate::{ use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, - traits::{Currency, Defensive, Get, Imbalance, OnUnbalanced}, + traits::{Currency, Defensive, DefensiveSaturating, Get, Imbalance, OnUnbalanced}, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -85,7 +85,7 @@ pub(crate) struct SlashingSpan { impl SlashingSpan { fn contains_era(&self, era: EraIndex) -> bool { - self.start <= era && self.length.map_or(true, |l| self.start + l > era) + self.start <= era && self.length.map_or(true, |l| self.start.saturating_add(l) > era) } } @@ -123,15 +123,15 @@ impl SlashingSpans { // returns `true` if a new span was started, `false` otherwise. `false` indicates // that internal state is unchanged. pub(crate) fn end_span(&mut self, now: EraIndex) -> bool { - let next_start = now + 1; + let next_start = now.defensive_saturating_add(1); if next_start <= self.last_start { return false } - let last_length = next_start - self.last_start; + let last_length = next_start.defensive_saturating_sub(self.last_start); self.prior.insert(0, last_length); self.last_start = next_start; - self.span_index += 1; + self.span_index.defensive_saturating_accrue(1); true } @@ -141,9 +141,9 @@ impl SlashingSpans { let mut index = self.span_index; let last = SlashingSpan { index, start: last_start, length: None }; let prior = self.prior.iter().cloned().map(move |length| { - let start = last_start - length; + let start = last_start.defensive_saturating_sub(length); last_start = start; - index -= 1; + index.defensive_saturating_reduce(1); SlashingSpan { index, start, length: Some(length) } }); @@ -164,13 +164,18 @@ impl SlashingSpans { let old_idx = self .iter() .skip(1) // skip ongoing span. - .position(|span| span.length.map_or(false, |len| span.start + len <= window_start)); + .position(|span| { + span.length + .map_or(false, |len| span.start.defensive_saturating_add(len) <= window_start) + }); - let earliest_span_index = self.span_index - self.prior.len() as SpanIndex; + let earliest_span_index = + self.span_index.defensive_saturating_sub(self.prior.len() as SpanIndex); let pruned = match old_idx { Some(o) => { self.prior.truncate(o); - let new_earliest = self.span_index - self.prior.len() as SpanIndex; + let new_earliest = + self.span_index.defensive_saturating_sub(self.prior.len() as SpanIndex); Some((earliest_span_index, new_earliest)) }, None => None, @@ -500,7 +505,7 @@ impl<'a, T: 'a + Config> InspectingSpans<'a, T> { let reward = if span_record.slashed < slash { // new maximum span slash. apply the difference. - let difference = slash - span_record.slashed; + let difference = slash.defensive_saturating_sub(span_record.slashed); span_record.slashed = slash; // compute reward. -- GitLab From e3242d2c1e2018395c218357046cc88caaed78f3 Mon Sep 17 00:00:00 2001 From: Julian Eager Date: Fri, 24 Nov 2023 18:32:43 +0800 Subject: [PATCH 111/199] Adapt test worker to profile flag (#2450) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit closes #2194 cc @mrcnski --------- Co-authored-by: Bastian Köcher --- .../benches/host_prepare_rococo_runtime.rs | 2 +- polkadot/node/core/pvf/build.rs | 21 +++++++++++++++++++ polkadot/node/core/pvf/src/testing.rs | 11 +++++----- polkadot/node/core/pvf/tests/it/main.rs | 2 +- .../node/core/pvf/tests/it/worker_common.rs | 6 +++--- 5 files changed, 32 insertions(+), 10 deletions(-) create mode 100644 polkadot/node/core/pvf/build.rs diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index f2551b701c2..e625a2303b5 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -36,7 +36,7 @@ impl TestHost { where F: FnOnce(&mut Config), { - let (prepare_worker_path, execute_worker_path) = testing::build_workers_and_get_paths(true); + let (prepare_worker_path, execute_worker_path) = testing::build_workers_and_get_paths(); let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( diff --git a/polkadot/node/core/pvf/build.rs b/polkadot/node/core/pvf/build.rs new file mode 100644 index 00000000000..e01cc6deecc --- /dev/null +++ b/polkadot/node/core/pvf/build.rs @@ -0,0 +1,21 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +fn main() { + if let Ok(profile) = std::env::var("PROFILE") { + println!(r#"cargo:rustc-cfg=build_type="{}""#, profile); + } +} diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index 400b65bfe7d..c7c885c4342 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -59,21 +59,22 @@ pub fn validate_candidate( /// /// NOTE: This should only be called in dev code (tests, benchmarks) as it relies on the relative /// paths of the built workers. -pub fn build_workers_and_get_paths(is_bench: bool) -> (PathBuf, PathBuf) { +pub fn build_workers_and_get_paths() -> (PathBuf, PathBuf) { // Only needs to be called once for the current process. static WORKER_PATHS: OnceLock> = OnceLock::new(); - fn build_workers(is_bench: bool) { + fn build_workers() { let mut build_args = vec![ "build", "--package=polkadot", "--bin=polkadot-prepare-worker", "--bin=polkadot-execute-worker", ]; - if is_bench { - // Benches require --release. Regular tests are debug (no flag needed). + + if cfg!(build_type = "release") { build_args.push("--release"); } + let mut cargo = std::process::Command::new("cargo"); let cmd = cargo // wasm runtime not needed @@ -117,7 +118,7 @@ pub fn build_workers_and_get_paths(is_bench: bool) -> (PathBuf, PathBuf) { } } - build_workers(is_bench); + build_workers(); Mutex::new((prepare_worker_path, execute_worker_path)) }); diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index b53d598cd0f..bd6b04182fd 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -52,7 +52,7 @@ impl TestHost { where F: FnOnce(&mut Config), { - let (prepare_worker_path, execute_worker_path) = build_workers_and_get_paths(false); + let (prepare_worker_path, execute_worker_path) = build_workers_and_get_paths(); let cache_dir = tempfile::tempdir().unwrap(); let mut config = Config::new( diff --git a/polkadot/node/core/pvf/tests/it/worker_common.rs b/polkadot/node/core/pvf/tests/it/worker_common.rs index 0d33af7e096..4b736b08ba6 100644 --- a/polkadot/node/core/pvf/tests/it/worker_common.rs +++ b/polkadot/node/core/pvf/tests/it/worker_common.rs @@ -23,7 +23,7 @@ use std::{env, time::Duration}; // Test spawning a program that immediately exits with a failure code. #[tokio::test] async fn spawn_immediate_exit() { - let (prepare_worker_path, _) = build_workers_and_get_paths(false); + let (prepare_worker_path, _) = build_workers_and_get_paths(); // There's no explicit `exit` subcommand in the worker; it will panic on an unknown // subcommand anyway @@ -41,7 +41,7 @@ async fn spawn_immediate_exit() { #[tokio::test] async fn spawn_timeout() { - let (_, execute_worker_path) = build_workers_and_get_paths(false); + let (_, execute_worker_path) = build_workers_and_get_paths(); let result = spawn_with_program_path( "integration-test", @@ -57,7 +57,7 @@ async fn spawn_timeout() { #[tokio::test] async fn should_connect() { - let (prepare_worker_path, _) = build_workers_and_get_paths(false); + let (prepare_worker_path, _) = build_workers_and_get_paths(); let _ = spawn_with_program_path( "integration-test", -- GitLab From 471eafcb8de32c0aac393a3cf3874ae62a495bf8 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Fri, 24 Nov 2023 13:52:55 +0100 Subject: [PATCH 112/199] Disable any peer connections for parachain nodes in pov-recovery zombienet test (#2475) I noticed that this test broke at some point. The parachain nodes should only acquire their blocks from the relay chain. But they were connecting to their peers and started fetching blocks from there. In this test I now take additional measures so we check that each nodes really uses pov-recovery to get the blocks. --- cumulus/zombienet/tests/0002-pov_recovery.toml | 10 +++++----- cumulus/zombienet/tests/0002-pov_recovery.zndsl | 7 +++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/cumulus/zombienet/tests/0002-pov_recovery.toml b/cumulus/zombienet/tests/0002-pov_recovery.toml index 34cacbc2a9b..105e4a324f3 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.toml +++ b/cumulus/zombienet/tests/0002-pov_recovery.toml @@ -48,7 +48,7 @@ add_to_genesis = false validator = false # full node image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}","--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0","--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # we fail recovery for 'eve' from time to time to test retries [[parachains.collators]] @@ -56,7 +56,7 @@ add_to_genesis = false validator = true # collator image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--fail-pov-recovery", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--fail-pov-recovery", "--in-peers 0", "--out-peers 0", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # run 'one' as a RPC collator who does not produce blocks [[parachains.collators]] @@ -64,7 +64,7 @@ add_to_genesis = false validator = true # collator image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-rpc-url {{'ferdie'|zombie('wsUri')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--in-peers 0", "--out-peers 0", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-rpc-url {{'ferdie'|zombie('wsUri')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # run 'two' as a RPC parachain full node [[parachains.collators]] @@ -72,7 +72,7 @@ add_to_genesis = false validator = false # full node image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-rpc-url {{'ferdie'|zombie('wsUri')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--in-peers 0", "--out-peers 0", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-rpc-url {{'ferdie'|zombie('wsUri')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # run 'three' with light client [[parachains.collators]] @@ -80,4 +80,4 @@ add_to_genesis = false validator = false # full node image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-light-client", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--in-peers 0", "--out-peers 0", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--relay-chain-light-client", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] diff --git a/cumulus/zombienet/tests/0002-pov_recovery.zndsl b/cumulus/zombienet/tests/0002-pov_recovery.zndsl index 12ff00210f3..7a93e2f3742 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.zndsl +++ b/cumulus/zombienet/tests/0002-pov_recovery.zndsl @@ -15,3 +15,10 @@ one: reports block height is at least 20 within 800 seconds two: reports block height is at least 20 within 800 seconds three: reports block height is at least 20 within 800 seconds eve: reports block height is at least 20 within 800 seconds + +one: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +two: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +three: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +eve: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +charlie: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds +alice: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds -- GitLab From 7554f53f9e02f6ea5c7cb6a4ef977925f7e78b96 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Fri, 24 Nov 2023 17:06:01 +0400 Subject: [PATCH 113/199] Remove dmp-queue pallet from Rococo Asset Hub and Bridge Hub (#2483) DMP queue migration is complete and the pallet should be removed to fix the check runtime upgrade CI. Screenshot 2023-11-24 at 16 05 37 Screenshot 2023-11-24 at 16 06 44 --- Cargo.lock | 2 - .../assets/asset-hub-rococo/Cargo.toml | 123 ++++++++-------- .../assets/asset-hub-rococo/src/lib.rs | 7 - .../src/weights/cumulus_pallet_dmp_queue.rs | 131 ------------------ .../asset-hub-rococo/src/weights/mod.rs | 1 - .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 100 ++++++------- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 8 -- .../src/weights/cumulus_pallet_dmp_queue.rs | 131 ------------------ .../bridge-hub-rococo/src/weights/mod.rs | 1 - 9 files changed, 119 insertions(+), 385 deletions(-) delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs diff --git a/Cargo.lock b/Cargo.lock index ab51be3d5db..ce43f78fa4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -924,7 +924,6 @@ dependencies = [ "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", @@ -2158,7 +2157,6 @@ dependencies = [ "bridge-hub-test-utils", "bridge-runtime-common", "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 0773eb0c8eb..f3bdabd9cef 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -7,74 +7,86 @@ description = "Rococo variant of Asset Hub parachain runtime" license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ + "derive", + "max-encoded-len", +] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false} -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false } +pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false } +pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false, optional = true } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} -sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false} +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "scale-info", "num-traits"] } +primitive-types = { version = "0.12.1", default-features = false, features = [ + "codec", + "scale-info", + "num-traits", +] } # Polkadot -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = [ + "parameterized-consensus-hook", +] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = [ + "bridging", +] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } @@ -107,7 +119,6 @@ default = [ "std" ] state-trie-version-1 = [ "pallet-state-trie-migration" ] runtime-benchmarks = [ "assets-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -142,7 +153,6 @@ runtime-benchmarks = [ ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -182,7 +192,6 @@ std = [ "bp-bridge-hub-westend/std", "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index b274f45877b..02b51310890 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -685,12 +685,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - parameter_types! { pub const Period: u32 = 6 * HOURS; pub const Offset: u32 = 0; @@ -912,7 +906,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs index 252cf2630f4..aa994a7608a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/mod.rs @@ -15,7 +15,6 @@ // along with Cumulus. If not, see . pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index c475768d5dd..42ea63bfea5 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -10,64 +10,71 @@ license = "Apache-2.0" substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ + "derive", +] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.10.0", default-features = false, features = [ + "derive", +] } serde = { version = "1.0.188", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = [ + "parameterized-consensus-hook", +] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = [ + "bridging", +] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } @@ -96,7 +103,9 @@ bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", de [dev-dependencies] static_assertions = "1.1" bridge-hub-test-utils = { path = "../test-utils" } -bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = ["integrity-test"] } +bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", features = [ + "integrity-test", +] } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } [features] @@ -117,7 +126,6 @@ std = [ "bridge-runtime-common/std", "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -179,7 +187,6 @@ std = [ runtime-benchmarks = [ "bridge-runtime-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -211,7 +218,6 @@ runtime-benchmarks = [ try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 8e138822696..7d64623f551 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -394,12 +394,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -500,7 +494,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, @@ -552,7 +545,6 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] // NOTE: Make sure you point to the individual modules below. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs index a615f539547..3604ab3c0ff 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/mod.rs @@ -21,7 +21,6 @@ use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; -- GitLab From 891628a7f1afcc210f83e944ddf3dcf4748fd0b6 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 24 Nov 2023 14:06:19 +0100 Subject: [PATCH 114/199] relay-chain-consensus: set a fork_choice (#2462) After #2001 the `fork_choice` strategy may remain uninitialized in the block import pipeline which uses relay-chain verifier. Refer to https://github.com/paritytech/polkadot-sdk/pull/2430#issuecomment-1823979813 for some further discussion. --- cumulus/client/consensus/relay-chain/src/import_queue.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cumulus/client/consensus/relay-chain/src/import_queue.rs b/cumulus/client/consensus/relay-chain/src/import_queue.rs index 9ee03b95904..f44f4409324 100644 --- a/cumulus/client/consensus/relay-chain/src/import_queue.rs +++ b/cumulus/client/consensus/relay-chain/src/import_queue.rs @@ -55,6 +55,10 @@ where &mut self, mut block_params: BlockImportParams, ) -> Result, String> { + block_params.fork_choice = Some(sc_consensus::ForkChoiceStrategy::Custom( + block_params.origin == sp_consensus::BlockOrigin::NetworkInitialSync, + )); + // Skip checks that include execution, if being told so, or when importing only state. // // This is done for example when gap syncing and it is expected that the block after the gap @@ -100,7 +104,6 @@ where } block_params.post_hash = Some(block_params.header.hash()); - Ok(block_params) } } -- GitLab From 803ea7605937f3f452c38dc17c66d73049a7ba00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Fri, 24 Nov 2023 15:20:21 +0100 Subject: [PATCH 115/199] Improve `UnpinHandleInner` debug (#2485) Printing the `unpin_worker_sender` included the entire stacktrace and that is a little bit too verbose. --- substrate/client/api/src/client.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index e334f2f9fb4..46232c74539 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -25,7 +25,11 @@ use sp_runtime::{ traits::{Block as BlockT, NumberFor}, Justifications, }; -use std::{collections::HashSet, fmt, sync::Arc}; +use std::{ + collections::HashSet, + fmt::{self, Debug}, + sync::Arc, +}; use crate::{blockchain::Info, notifications::StorageEventStream, FinalizeSummary, ImportSummary}; @@ -271,13 +275,18 @@ impl fmt::Display for UsageInfo { } /// Sends a message to the pinning-worker once dropped to unpin a block in the backend. -#[derive(Debug)] pub struct UnpinHandleInner { /// Hash of the block pinned by this handle hash: Block::Hash, unpin_worker_sender: TracingUnboundedSender, } +impl Debug for UnpinHandleInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("UnpinHandleInner").field("pinned_block", &self.hash).finish() + } +} + impl UnpinHandleInner { /// Create a new [`UnpinHandleInner`] pub fn new( -- GitLab From 07ea6da2d8d4f0eb5f563d6803768d4ef0de6cda Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Fri, 24 Nov 2023 16:21:10 +0200 Subject: [PATCH 116/199] Derive `MaxEncodedLen` on `SlotDuration` (#2484) # Description Needed this in my code as part of the larger data structure. --------- Co-authored-by: command-bot <> --- substrate/primitives/consensus/slots/src/lib.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/substrate/primitives/consensus/slots/src/lib.rs b/substrate/primitives/consensus/slots/src/lib.rs index a299ce395ea..b9a032c1bcf 100644 --- a/substrate/primitives/consensus/slots/src/lib.rs +++ b/substrate/primitives/consensus/slots/src/lib.rs @@ -121,7 +121,20 @@ impl From for u64 { } /// A slot duration defined in milliseconds. -#[derive(Clone, Copy, Debug, Encode, Decode, Hash, PartialOrd, Ord, PartialEq, Eq, TypeInfo)] +#[derive( + Clone, + Copy, + Debug, + Encode, + Decode, + MaxEncodedLen, + Hash, + PartialOrd, + Ord, + PartialEq, + Eq, + TypeInfo, +)] pub struct SlotDuration(u64); impl SlotDuration { -- GitLab From 90488ff7e32c79a601e95eeee0675a43c17ba157 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 24 Nov 2023 17:30:25 +0200 Subject: [PATCH 117/199] pallet-xcm: fix benchmarking (#2489) Use non-zero weight limit in benchmarking `pallet_xcm::execute()` so the given XCM actually executes. --- polkadot/xcm/pallet-xcm/src/benchmarking.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 3aca24791fc..f0289512399 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -168,7 +168,7 @@ benchmarks! { return Err(BenchmarkError::Override(BenchmarkResult::from_weight(Weight::MAX))) } let versioned_msg = VersionedXcm::from(msg); - }: _>(execute_origin, Box::new(versioned_msg), Weight::zero()) + }: _>(execute_origin, Box::new(versioned_msg), Weight::MAX) force_xcm_version { let loc = T::reachable_dest().ok_or( -- GitLab From 8af61d03b62d541370015608b48c7399d5f67320 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Fri, 24 Nov 2023 16:31:20 +0100 Subject: [PATCH 118/199] [NPoS] Use EraInfo to manipulate exposure in fast-unstake tests (#2459) The FastUnstake pallet tests were previously directly modifying storage items in the pallet-staking to alter exposure. However, due to the introduction of the [new paged exposure feature](https://github.com/paritytech/polkadot-sdk/pull/1189) these tests were not testing against correct storage items. This issue resulted in a bug that I didn't catch, which has been addressed in [this fix](https://github.com/paritytech/polkadot-sdk/pull/2369). This PR introduces a modification to how the pallet-fast-unstake handles exposure. It now utilizes `pallet-staking::EraInfo` to set or mutate Exposures. --- substrate/frame/fast-unstake/src/mock.rs | 11 ++++++----- substrate/frame/fast-unstake/src/tests.rs | 8 +++++--- substrate/frame/staking/src/lib.rs | 10 +++++----- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index df133bdfd47..09a08f222b6 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -244,7 +244,7 @@ impl ExtBuilder { (v, Exposure { total: 0, own: 0, others }) }) .for_each(|(validator, exposure)| { - pallet_staking::ErasStakers::::insert(era, validator, exposure); + pallet_staking::EraInfo::::set_exposure(era, &validator, exposure); }); } @@ -342,10 +342,11 @@ pub fn assert_unstaked(stash: &AccountId) { } pub fn create_exposed_nominator(exposed: AccountId, era: u32) { - // create an exposed nominator in era 1 - pallet_staking::ErasStakers::::mutate(era, VALIDATORS_PER_ERA, |expo| { - expo.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); - }); + // create an exposed nominator in passed era + let mut exposure = pallet_staking::EraInfo::::get_full_exposure(era, &VALIDATORS_PER_ERA); + exposure.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); + pallet_staking::EraInfo::::set_exposure(era, &VALIDATORS_PER_ERA, exposure); + Balances::make_free_balance_be(&exposed, 100); assert_ok!(Staking::bond( RuntimeOrigin::signed(exposed), diff --git a/substrate/frame/fast-unstake/src/tests.rs b/substrate/frame/fast-unstake/src/tests.rs index 94ad6a84b85..b19fe3b8c46 100644 --- a/substrate/frame/fast-unstake/src/tests.rs +++ b/substrate/frame/fast-unstake/src/tests.rs @@ -788,10 +788,12 @@ mod on_idle { assert_ok!(FastUnstake::register_fast_unstake(RuntimeOrigin::signed(VALIDATOR_PREFIX))); // but they indeed are exposed! - assert!(pallet_staking::ErasStakers::::contains_key( + assert!(pallet_staking::EraInfo::::get_paged_exposure( BondingDuration::get() - 1, - VALIDATOR_PREFIX - )); + &VALIDATOR_PREFIX, + 0 + ) + .is_some()); // process a block, this validator is exposed and has been slashed. next_block(true); diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index e6250bb9681..41cb2a12c3a 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -724,7 +724,7 @@ pub struct Nominations { /// This is useful where we need to take into account the validator's own stake and total exposure /// in consideration, in addition to the individual nominators backing them. #[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Eq)] -struct PagedExposure { +pub struct PagedExposure { exposure_metadata: PagedExposureMetadata, exposure_page: ExposurePage, } @@ -1017,7 +1017,7 @@ where /// Wrapper struct for Era related information. It is not a pure encapsulation as these storage /// items can be accessed directly but nevertheless, its recommended to use `EraInfo` where we /// can and add more functions to it as needed. -pub(crate) struct EraInfo(sp_std::marker::PhantomData); +pub struct EraInfo(sp_std::marker::PhantomData); impl EraInfo { /// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy /// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be @@ -1047,7 +1047,7 @@ impl EraInfo { /// /// This builds a paged exposure from `PagedExposureMetadata` and `ExposurePage` of the /// validator. For older non-paged exposure, it returns the clipped exposure directly. - pub(crate) fn get_paged_exposure( + pub fn get_paged_exposure( era: EraIndex, validator: &T::AccountId, page: Page, @@ -1082,7 +1082,7 @@ impl EraInfo { } /// Get full exposure of the validator at a given era. - pub(crate) fn get_full_exposure( + pub fn get_full_exposure( era: EraIndex, validator: &T::AccountId, ) -> Exposure> { @@ -1176,7 +1176,7 @@ impl EraInfo { } /// Store exposure for elected validators at start of an era. - pub(crate) fn set_exposure( + pub fn set_exposure( era: EraIndex, validator: &T::AccountId, exposure: Exposure>, -- GitLab From d07186b8e3ec6634187936caaae48bb8558e3346 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Fri, 24 Nov 2023 16:48:56 +0000 Subject: [PATCH 119/199] Remove `RuntimeApi` dependency on system parachain runtime code (#2455) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The last issue blocking the removal of the Polkadot and Kusama system parachains from the repo in #1737 is the dependency on the runtime code through the RuntimeApi in `polkadot-parachain`. This PR introduces two fake runtimes to satisfy the build requirements and changes the `new_partial` function to make it not be generic over the runtimes. The reason for the second runtime is the different Aura keys used in Polkadot Asset Hub, as the impl for AuraApi depends on this type. If this changes the `RuntimeApi` generic could be removed completely from all functions in `services.rs` and and generic type parameters in `services.rs` and specified as a concrete type to TFullClient`. --------- Co-authored-by: Bastian Köcher --- Cargo.lock | 9 + cumulus/polkadot-parachain/Cargo.toml | 13 + .../src/chain_spec/bridge_hubs.rs | 5 - cumulus/polkadot-parachain/src/command.rs | 285 +++--------------- .../asset_hub_polkadot_aura.rs | 200 ++++++++++++ .../src/fake_runtime_api/aura.rs | 200 ++++++++++++ .../src/fake_runtime_api/mod.rs | 21 ++ cumulus/polkadot-parachain/src/main.rs | 1 + cumulus/polkadot-parachain/src/service.rs | 24 +- 9 files changed, 497 insertions(+), 261 deletions(-) create mode 100644 cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs create mode 100644 cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs create mode 100644 cumulus/polkadot-parachain/src/fake_runtime_api/mod.rs diff --git a/Cargo.lock b/Cargo.lock index ce43f78fa4f..3fbede968bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12964,6 +12964,9 @@ dependencies = [ "cumulus-relay-chain-interface", "frame-benchmarking", "frame-benchmarking-cli", + "frame-support", + "frame-system-rpc-runtime-api", + "frame-try-runtime", "futures", "glutton-runtime", "glutton-westend-runtime", @@ -12971,7 +12974,9 @@ dependencies = [ "jsonrpsee", "log", "nix 0.26.2", + "pallet-transaction-payment", "pallet-transaction-payment-rpc", + "pallet-transaction-payment-rpc-runtime-api", "parachains-common", "parity-scale-codec", "penpal-runtime", @@ -13003,14 +13008,18 @@ dependencies = [ "sp-blockchain", "sp-consensus-aura", "sp-core", + "sp-genesis-builder", + "sp-inherents", "sp-io", "sp-keystore", "sp-offchain", "sp-runtime", "sp-session", + "sp-std 8.0.0", "sp-timestamp", "sp-tracing 10.0.0", "sp-transaction-pool", + "sp-version", "staging-xcm", "substrate-build-script-utils", "substrate-frame-rpc-system", diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index d5deda9e7bf..e23a7aeb22b 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -49,8 +49,10 @@ sp-runtime = { path = "../../substrate/primitives/runtime", default-features = f sp-io = { path = "../../substrate/primitives/io" } sp-core = { path = "../../substrate/primitives/core" } sp-session = { path = "../../substrate/primitives/session" } +frame-try-runtime = { path = "../../substrate/frame/try-runtime", optional = true } sc-consensus = { path = "../../substrate/client/consensus/common" } sp-tracing = { path = "../../substrate/primitives/tracing" } +frame-support = { path = "../../substrate/frame/support" } sc-cli = { path = "../../substrate/client/cli" } sc-client-api = { path = "../../substrate/client/api" } sc-executor = { path = "../../substrate/client/executor" } @@ -63,12 +65,19 @@ sc-network-sync = { path = "../../substrate/client/network/sync" } sc-basic-authorship = { path = "../../substrate/client/basic-authorship" } sp-timestamp = { path = "../../substrate/primitives/timestamp" } sp-blockchain = { path = "../../substrate/primitives/blockchain" } +sp-genesis-builder = { path = "../../substrate/primitives/genesis-builder", default-features = false } sp-block-builder = { path = "../../substrate/primitives/block-builder" } sp-keystore = { path = "../../substrate/primitives/keystore" } sc-chain-spec = { path = "../../substrate/client/chain-spec" } sc-rpc = { path = "../../substrate/client/rpc" } +sp-version = { path = "../../substrate/primitives/version" } sc-tracing = { path = "../../substrate/client/tracing" } sp-offchain = { path = "../../substrate/primitives/offchain" } +frame-system-rpc-runtime-api = { path = "../../substrate/frame/system/rpc/runtime-api" } +pallet-transaction-payment = { path = "../../substrate/frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { path = "../../substrate/frame/transaction-payment/rpc/runtime-api" } +sp-std = { path = "../../substrate/primitives/std" } +sp-inherents = { path = "../../substrate/primitives/inherents" } sp-api = { path = "../../substrate/primitives/api" } sp-consensus-aura = { path = "../../substrate/primitives/consensus/aura" } sc-sysinfo = { path = "../../substrate/client/sysinfo" } @@ -126,6 +135,7 @@ runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", "glutton-runtime/runtime-benchmarks", "glutton-westend-runtime/runtime-benchmarks", "parachains-common/runtime-benchmarks", @@ -149,8 +159,11 @@ try-runtime = [ "collectives-polkadot-runtime/try-runtime", "collectives-westend-runtime/try-runtime", "contracts-rococo-runtime/try-runtime", + "frame-support/try-runtime", + "frame-try-runtime/try-runtime", "glutton-runtime/try-runtime", "glutton-westend-runtime/try-runtime", + "pallet-transaction-payment/try-runtime", "penpal-runtime/try-runtime", "polkadot-cli/try-runtime", "polkadot-service/try-runtime", diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 94cef106001..eb253908f5f 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -199,8 +199,6 @@ pub mod rococo { /// Specialized `ChainSpec` for the normal parachain runtime. pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - pub type RuntimeApi = bridge_hub_rococo_runtime::RuntimeApi; - pub fn local_config( id: &str, chain_name: &str, @@ -319,7 +317,6 @@ pub mod kusama { /// Specialized `ChainSpec` for the normal parachain runtime. pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - pub type RuntimeApi = bridge_hub_kusama_runtime::RuntimeApi; pub fn local_config( id: &str, @@ -429,7 +426,6 @@ pub mod westend { /// Specialized `ChainSpec` for the normal parachain runtime. pub type BridgeHubChainSpec = sc_service::GenericChainSpec; - pub type RuntimeApi = bridge_hub_westend_runtime::RuntimeApi; pub fn local_config( id: &str, @@ -545,7 +541,6 @@ pub mod polkadot { /// Specialized `ChainSpec` for the normal parachain runtime. pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - pub type RuntimeApi = bridge_hub_polkadot_runtime::RuntimeApi; pub fn local_config( id: &str, diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index 7bede22fea7..ea19ad12ba0 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -17,6 +17,9 @@ use crate::{ chain_spec, cli::{Cli, RelayChainCli, Subcommand}, + fake_runtime_api::{ + asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, aura::RuntimeApi, + }, service::{new_partial, Block}, }; use cumulus_primitives_core::ParaId; @@ -439,128 +442,46 @@ impl SubstrateCli for RelayChainCli { macro_rules! construct_partials { ($config:expr, |$partials:ident| $code:expr) => { match $config.chain_spec.runtime() { - Runtime::AssetHubKusama => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - Runtime::AssetHubRococo => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - Runtime::AssetHubWestend => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, Runtime::AssetHubPolkadot => { - let $partials = new_partial::( + let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AssetHubPolkadotAuraId>, )?; $code }, - Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaDevelopment => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, - }, - Runtime::CollectivesPolkadot => { - let $partials = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - $code - }, + Runtime::AssetHubKusama | + Runtime::AssetHubRococo | + Runtime::AssetHubWestend | + Runtime::BridgeHub(_) | + Runtime::CollectivesPolkadot | Runtime::CollectivesWestend => { - let $partials = new_partial::( + let $partials = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, )?; $code }, - Runtime::Shell => { - let $partials = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - $code - }, - Runtime::Seedling => { - let $partials = new_partial::( + Runtime::GluttonWestend | Runtime::Glutton | Runtime::Shell | Runtime::Seedling => { + let $partials = new_partial::( &$config, crate::service::shell_build_import_queue, )?; $code }, Runtime::ContractsRococo => { - let $partials = new_partial::( + let $partials = new_partial::( &$config, crate::service::contracts_rococo_build_import_queue, )?; $code }, Runtime::Penpal(_) | Runtime::Default => { - let $partials = new_partial::( + let $partials = new_partial::( &$config, crate::service::rococo_parachain_build_import_queue, )?; $code }, - Runtime::GluttonWestend => { - let $partials = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - $code - }, - Runtime::Glutton => { - let $partials = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - $code - }, } }; } @@ -569,39 +490,9 @@ macro_rules! construct_async_run { (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ let runner = $cli.create_runner($cmd)?; match runner.config().chain_spec.runtime() { - Runtime::AssetHubWestend => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::AssetHubRococo => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::AssetHubKusama => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, Runtime::AssetHubPolkadot => { runner.async_run(|$config| { - let $components = new_partial::( + let $components = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AssetHubPolkadotAuraId>, )?; @@ -609,9 +500,14 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::CollectivesPolkadot => { + Runtime::AssetHubWestend | + Runtime::AssetHubRococo | + Runtime::AssetHubKusama | + Runtime::CollectivesPolkadot | + Runtime::CollectivesWestend | + Runtime::BridgeHub(_) => { runner.async_run(|$config| { - let $components = new_partial::( + let $components = new_partial::( &$config, crate::service::aura_build_import_queue::<_, AuraId>, )?; @@ -619,39 +515,22 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::CollectivesWestend => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Shell => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Seedling => { + Runtime::Shell | + Runtime::Seedling | + Runtime::GluttonWestend | + Runtime::Glutton => { runner.async_run(|$config| { - let $components = new_partial::( + let $components = new_partial::( &$config, crate::service::shell_build_import_queue, )?; let task_manager = $components.task_manager; { $( $code )* }.map(|v| (v, task_manager)) }) - }, + } Runtime::ContractsRococo => { runner.async_run(|$config| { - let $components = new_partial::( + let $components = new_partial::( &$config, crate::service::contracts_rococo_build_import_queue, )?; @@ -659,66 +538,10 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::BridgeHub(bridge_hub_runtime_type) => { - match bridge_hub_runtime_type { - chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaDevelopment => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Westend | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - chain_spec::bridge_hubs::BridgeHubRuntimeType::Rococo | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::aura_build_import_queue::<_, AuraId>, - )?; - - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - } - }, Runtime::Penpal(_) | Runtime::Default => { runner.async_run(|$config| { let $components = new_partial::< - rococo_parachain_runtime::RuntimeApi, + RuntimeApi, _, >( &$config, @@ -728,26 +551,6 @@ macro_rules! construct_async_run { { $( $code )* }.map(|v| (v, task_manager)) }) }, - Runtime::GluttonWestend => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }, - Runtime::Glutton => { - runner.async_run(|$config| { - let $components = new_partial::( - &$config, - crate::service::shell_build_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - } } }} } @@ -927,28 +730,28 @@ pub fn run() -> Result<()> { match config.chain_spec.runtime() { Runtime::AssetHubPolkadot => crate::service::start_asset_hub_node::< - asset_hub_polkadot_runtime::RuntimeApi, + AssetHubPolkadotRuntimeApi, AssetHubPolkadotAuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Runtime::AssetHubKusama => crate::service::start_asset_hub_node::< - asset_hub_kusama_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Runtime::AssetHubRococo => crate::service::start_asset_hub_node::< - asset_hub_rococo_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Runtime::AssetHubWestend => crate::service::start_asset_hub_node::< - asset_hub_westend_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -956,7 +759,7 @@ pub fn run() -> Result<()> { .map_err(Into::into), Runtime::CollectivesPolkadot => crate::service::start_generic_aura_node::< - collectives_polkadot_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -964,14 +767,14 @@ pub fn run() -> Result<()> { .map_err(Into::into), Runtime::CollectivesWestend => crate::service::start_generic_aura_node::< - collectives_westend_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0) .map_err(Into::into), Runtime::Shell => - crate::service::start_shell_node::( + crate::service::start_shell_node::( config, polkadot_config, collator_options, @@ -982,7 +785,7 @@ pub fn run() -> Result<()> { .map(|r| r.0) .map_err(Into::into), Runtime::Seedling => - crate::service::start_shell_node::( + crate::service::start_shell_node::( config, polkadot_config, collator_options, @@ -1007,7 +810,7 @@ chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::polkadot::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -1016,7 +819,7 @@ chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaDevelopment => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::kusama::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -1025,7 +828,7 @@ chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::WestendDevelopment => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::westend::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -1034,7 +837,7 @@ chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoLocal | chain_spec::bridge_hubs::BridgeHubRuntimeType::RococoDevelopment => crate::service::start_generic_aura_node::< - chain_spec::bridge_hubs::rococo::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -1054,7 +857,7 @@ chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | .map_err(Into::into), Runtime::GluttonWestend => crate::service::start_basic_lookahead_node::< - glutton_westend_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await @@ -1062,7 +865,7 @@ chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | .map_err(Into::into), Runtime::Glutton => crate::service::start_basic_lookahead_node::< - glutton_runtime::RuntimeApi, + RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs new file mode 100644 index 00000000000..76dd7347ccb --- /dev/null +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/asset_hub_polkadot_aura.rs @@ -0,0 +1,200 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! These are used to provide a type that implements these runtime APIs without requiring to import +//! the native runtimes. + +use frame_support::weights::Weight; +use parachains_common::{AccountId, AssetHubPolkadotAuraId, Balance, Nonce}; +use polkadot_primitives::Block; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; + +pub struct Runtime; + +sp_api::impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + + fn execute_block(_: Block) { + unimplemented!() + } + + fn initialize_block(_: &::Header) { + unimplemented!() + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + + fn metadata_at_version(_: u32) -> Option { + unimplemented!() + } + + fn metadata_versions() -> sp_std::vec::Vec { + unimplemented!() + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + unimplemented!() + } + + fn authorities() -> Vec { + unimplemented!() + } + } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + _: ::Hash, + _: cumulus_primitives_aura::Slot, + ) -> bool { + unimplemented!() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(_: ::Extrinsic) -> ApplyExtrinsicResult { + unimplemented!() + } + + fn finalize_block() -> ::Header { + unimplemented!() + } + + fn inherent_extrinsics(_: sp_inherents::InherentData) -> Vec<::Extrinsic> { + unimplemented!() + } + + fn check_inherents(_: Block, _: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + unimplemented!() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + _: TransactionSource, + _: ::Extrinsic, + _: ::Hash, + ) -> TransactionValidity { + unimplemented!() + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(_: &::Header) { + unimplemented!() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(_: Option>) -> Vec { + unimplemented!() + } + + fn decode_session_keys( + _: Vec, + ) -> Option, KeyTypeId)>> { + unimplemented!() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + _: ::Extrinsic, + _: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + unimplemented!() + } + fn query_fee_details( + _: ::Extrinsic, + _: u32, + ) -> pallet_transaction_payment::FeeDetails { + unimplemented!() + } + fn query_weight_to_fee(_: Weight) -> Balance { + unimplemented!() + } + fn query_length_to_fee(_: u32) -> Balance { + unimplemented!() + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(_: &::Header) -> cumulus_primitives_core::CollationInfo { + unimplemented!() + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(_: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + unimplemented!() + } + + fn execute_block( + _: Block, + _: bool, + _: bool, + _: frame_try_runtime::TryStateSelect, + ) -> Weight { + unimplemented!() + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(_: AccountId) -> Nonce { + unimplemented!() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(_: bool) -> ( + Vec, + Vec, + ) { + unimplemented!() + } + + fn dispatch_benchmark( + _: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + unimplemented!() + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + unimplemented!() + } + + fn build_config(_: Vec) -> sp_genesis_builder::Result { + unimplemented!() + } + } +} diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs new file mode 100644 index 00000000000..0f01b85ebcf --- /dev/null +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/aura.rs @@ -0,0 +1,200 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! These are used to provide a type that implements these runtime APIs without requiring to import +//! the native runtimes. + +use frame_support::weights::Weight; +use parachains_common::{AccountId, AuraId, Balance, Nonce}; +use polkadot_primitives::Block; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; + +pub struct Runtime; + +sp_api::impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + + fn execute_block(_: Block) { + unimplemented!() + } + + fn initialize_block(_: &::Header) { + unimplemented!() + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + + fn metadata_at_version(_: u32) -> Option { + unimplemented!() + } + + fn metadata_versions() -> sp_std::vec::Vec { + unimplemented!() + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + unimplemented!() + } + + fn authorities() -> Vec { + unimplemented!() + } + } + + impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { + fn can_build_upon( + _: ::Hash, + _: cumulus_primitives_aura::Slot, + ) -> bool { + unimplemented!() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(_: ::Extrinsic) -> ApplyExtrinsicResult { + unimplemented!() + } + + fn finalize_block() -> ::Header { + unimplemented!() + } + + fn inherent_extrinsics(_: sp_inherents::InherentData) -> Vec<::Extrinsic> { + unimplemented!() + } + + fn check_inherents(_: Block, _: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { + unimplemented!() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + _: TransactionSource, + _: ::Extrinsic, + _: ::Hash, + ) -> TransactionValidity { + unimplemented!() + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(_: &::Header) { + unimplemented!() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(_: Option>) -> Vec { + unimplemented!() + } + + fn decode_session_keys( + _: Vec, + ) -> Option, KeyTypeId)>> { + unimplemented!() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + _: ::Extrinsic, + _: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + unimplemented!() + } + fn query_fee_details( + _: ::Extrinsic, + _: u32, + ) -> pallet_transaction_payment::FeeDetails { + unimplemented!() + } + fn query_weight_to_fee(_: Weight) -> Balance { + unimplemented!() + } + fn query_length_to_fee(_: u32) -> Balance { + unimplemented!() + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info(_: &::Header) -> cumulus_primitives_core::CollationInfo { + unimplemented!() + } + } + + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade(_: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { + unimplemented!() + } + + fn execute_block( + _: Block, + _: bool, + _: bool, + _: frame_try_runtime::TryStateSelect, + ) -> Weight { + unimplemented!() + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(_: AccountId) -> Nonce { + unimplemented!() + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(_: bool) -> ( + Vec, + Vec, + ) { + unimplemented!() + } + + fn dispatch_benchmark( + _: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + unimplemented!() + } + } + + impl sp_genesis_builder::GenesisBuilder for Runtime { + fn create_default_config() -> Vec { + unimplemented!() + } + + fn build_config(_: Vec) -> sp_genesis_builder::Result { + unimplemented!() + } + } +} diff --git a/cumulus/polkadot-parachain/src/fake_runtime_api/mod.rs b/cumulus/polkadot-parachain/src/fake_runtime_api/mod.rs new file mode 100644 index 00000000000..29e2736b06f --- /dev/null +++ b/cumulus/polkadot-parachain/src/fake_runtime_api/mod.rs @@ -0,0 +1,21 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! In an ideal world this would be one runtime which would simplify the code massively. +//! This is not an ideal world - Polkadot Asset Hub has a different key type. + +pub mod asset_hub_polkadot_aura; +pub mod aura; diff --git a/cumulus/polkadot-parachain/src/main.rs b/cumulus/polkadot-parachain/src/main.rs index 26d7dae4b8a..0757bea84aa 100644 --- a/cumulus/polkadot-parachain/src/main.rs +++ b/cumulus/polkadot-parachain/src/main.rs @@ -22,6 +22,7 @@ mod chain_spec; mod cli; mod command; +mod fake_runtime_api; mod rpc; mod service; diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index f580835db55..b0fca518201 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -40,7 +40,7 @@ use sp_core::Pair; use jsonrpsee::RpcModule; -use crate::rpc; +use crate::{fake_runtime_api::aura::RuntimeApi, rpc}; pub use parachains_common::{AccountId, Balance, Block, BlockNumber, Hash, Header, Nonce}; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; @@ -917,8 +917,8 @@ where /// Build the import queue for the rococo parachain runtime. pub fn rococo_parachain_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, + client: Arc>, + block_import: ParachainBlockImport, config: &Configuration, telemetry: Option, task_manager: &TaskManager, @@ -960,11 +960,8 @@ pub async fn start_rococo_parachain_node( collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, -) -> sc_service::error::Result<( - TaskManager, - Arc>, -)> { - start_node_impl::( +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_node_impl::( parachain_config, polkadot_config, collator_options, @@ -1866,8 +1863,8 @@ where #[allow(clippy::type_complexity)] pub fn contracts_rococo_build_import_queue( - client: Arc>, - block_import: ParachainBlockImport, + client: Arc>, + block_import: ParachainBlockImport, config: &Configuration, telemetry: Option, task_manager: &TaskManager, @@ -1909,11 +1906,8 @@ pub async fn start_contracts_rococo_node( collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, -) -> sc_service::error::Result<( - TaskManager, - Arc>, -)> { - start_contracts_rococo_node_impl::( +) -> sc_service::error::Result<(TaskManager, Arc>)> { + start_contracts_rococo_node_impl::( parachain_config, polkadot_config, collator_options, -- GitLab From 81638d4d5e78b67fda4130308930bb4a19b20144 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Fri, 24 Nov 2023 18:22:28 +0100 Subject: [PATCH 120/199] Add missing workspace members (#2491) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The following members have been added: ```pre cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal cumulus/parachains/testnets-common polkadot/node/tracking-allocator substrate/frame/examples/frame-crate ``` CI check can be added after https://github.com/paritytech/pipeline-scripts/pull/105 is merged. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- Cargo.toml | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 57079aa4d03..6af3ea4c3cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,8 +22,8 @@ members = [ "bridges/primitives/chain-bridge-hub-rococo", "bridges/primitives/chain-bridge-hub-westend", "bridges/primitives/chain-kusama", - "bridges/primitives/chain-polkadot", "bridges/primitives/chain-polkadot-bulletin", + "bridges/primitives/chain-polkadot", "bridges/primitives/chain-rococo", "bridges/primitives/chain-westend", "bridges/primitives/header-chain", @@ -60,20 +60,20 @@ members = [ "cumulus/parachain-template/pallets/template", "cumulus/parachain-template/runtime", "cumulus/parachains/common", - "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo", - "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", - "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", - "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", - "cumulus/parachains/integration-tests/emulated/common", - "cumulus/parachains/integration-tests/emulated/chains/relays/rococo", - "cumulus/parachains/integration-tests/emulated/chains/relays/westend", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend", "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo", "cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend", + "cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal", + "cumulus/parachains/integration-tests/emulated/chains/relays/rococo", + "cumulus/parachains/integration-tests/emulated/chains/relays/westend", + "cumulus/parachains/integration-tests/emulated/common", "cumulus/parachains/integration-tests/emulated/networks/rococo-system", - "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system", "cumulus/parachains/integration-tests/emulated/networks/westend-system", + "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", + "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo", + "cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend", "cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/parachain-info", "cumulus/parachains/pallets/ping", @@ -98,6 +98,7 @@ members = [ "cumulus/parachains/runtimes/test-utils", "cumulus/parachains/runtimes/testing/penpal", "cumulus/parachains/runtimes/testing/rococo-parachain", + "cumulus/parachains/testnets-common", "cumulus/polkadot-parachain", "cumulus/primitives/aura", "cumulus/primitives/core", @@ -156,6 +157,7 @@ members = [ "polkadot/node/subsystem", "polkadot/node/test/client", "polkadot/node/test/service", + "polkadot/node/tracking-allocator", "polkadot/node/zombienet-backchannel", "polkadot/parachain", "polkadot/parachain/test-parachains", @@ -190,9 +192,9 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", + "substrate", "substrate/bin/minimal/node", "substrate/bin/minimal/runtime", - "substrate", "substrate/bin/node-template/node", "substrate/bin/node-template/pallets/template", "substrate/bin/node-template/runtime", @@ -288,9 +290,9 @@ members = [ "substrate/frame/collective", "substrate/frame/contracts", "substrate/frame/contracts/fixtures", + "substrate/frame/contracts/mock-network", "substrate/frame/contracts/primitives", "substrate/frame/contracts/proc-macro", - "substrate/frame/contracts/mock-network", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", "substrate/frame/democracy", @@ -305,6 +307,7 @@ members = [ "substrate/frame/examples/basic", "substrate/frame/examples/default-config", "substrate/frame/examples/dev-mode", + "substrate/frame/examples/frame-crate", "substrate/frame/examples/kitchensink", "substrate/frame/examples/offchain-worker", "substrate/frame/examples/split", @@ -470,6 +473,7 @@ members = [ "substrate/utils/frame/try-runtime/cli", "substrate/utils/prometheus", "substrate/utils/wasm-builder", + "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system", ] default-members = [ "polkadot", "substrate/bin/node/cli" ] -- GitLab From 63f1210d084014e77bc5a33ac8a35e4508587dcb Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Fri, 24 Nov 2023 20:02:27 -0300 Subject: [PATCH 121/199] bump zombienet version `v1.3.83` (#2492) Include fix for `0002-parachains-upgrade-smoke-test` test. --- .github/review-bot.yml | 2 +- .gitlab/pipeline/zombienet.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/review-bot.yml b/.github/review-bot.yml index b053ead37fb..c522988f02e 100644 --- a/.github/review-bot.yml +++ b/.github/review-bot.yml @@ -9,7 +9,7 @@ rules: - ^\.config/nextest.toml - ^\.cargo/.* exclude: - - ^./gitlab/pipeline/zombienet.* + - ^\.gitlab/pipeline/zombienet.* minApprovals: 2 type: basic teams: diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index fc3fd125619..558cc549cb0 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,7 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.82" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.83" include: # substrate tests -- GitLab From 5e2734219880b688e63d033968dafe0960f234ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sat, 25 Nov 2023 00:22:52 +0100 Subject: [PATCH 122/199] Do not pollute global base path with export genesis/wasm (#2487) Otherwise the user may runs into weird errors if there is already a db. --- cumulus/client/cli/src/lib.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cumulus/client/cli/src/lib.rs b/cumulus/client/cli/src/lib.rs index a2238b73b2b..7e78afe6efb 100644 --- a/cumulus/client/cli/src/lib.rs +++ b/cumulus/client/cli/src/lib.rs @@ -215,6 +215,13 @@ impl sc_cli::CliConfiguration for ExportGenesisStateCommand { fn shared_params(&self) -> &sc_cli::SharedParams { &self.shared_params } + + fn base_path(&self) -> sc_cli::Result> { + // As we are just exporting the genesis wasm a tmp database is enough. + // + // As otherwise we may "pollute" the global base path. + Ok(Some(BasePath::new_temp_dir()?)) + } } /// Command for exporting the genesis wasm file. @@ -266,6 +273,13 @@ impl sc_cli::CliConfiguration for ExportGenesisWasmCommand { fn shared_params(&self) -> &sc_cli::SharedParams { &self.shared_params } + + fn base_path(&self) -> sc_cli::Result> { + // As we are just exporting the genesis wasm a tmp database is enough. + // + // As otherwise we may "pollute" the global base path. + Ok(Some(BasePath::new_temp_dir()?)) + } } fn validate_relay_chain_url(arg: &str) -> Result { -- GitLab From ffc64fd255bc3f1d60138e01a2987540c3f72791 Mon Sep 17 00:00:00 2001 From: Javier Viola Date: Fri, 24 Nov 2023 20:42:20 -0300 Subject: [PATCH 123/199] Zombienet: add polkadot-debug image publish as needs for cumulus tests (#2493) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add `build-push-image-polkadot-debug` job to needs since we changed to use the polkadot-debug image from the current branch for cumulus test and we need to be sure that the image is ready. Fix issues like https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/4481059 cc: @bkchr Co-authored-by: Bastian Köcher --- .gitlab/pipeline/zombienet/cumulus.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitlab/pipeline/zombienet/cumulus.yml b/.gitlab/pipeline/zombienet/cumulus.yml index 3e4df000b7f..409c0aba68e 100644 --- a/.gitlab/pipeline/zombienet/cumulus.yml +++ b/.gitlab/pipeline/zombienet/cumulus.yml @@ -27,6 +27,8 @@ needs: - job: build-push-image-test-parachain artifacts: true + - job: build-push-image-polkadot-debug + artifacts: true variables: POLKADOT_IMAGE: "docker.io/paritypr/polkadot-debug:${DOCKER_IMAGES_VERSION}" GH_DIR: "https://github.com/paritytech/cumulus/tree/${CI_COMMIT_SHORT_SHA}/zombienet/tests" -- GitLab From d5d15a18029eed59914fbf807956ee6ab3bcabf1 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Sat, 25 Nov 2023 09:56:17 +0100 Subject: [PATCH 124/199] polkadot-parachain: one chain-spec for all (#2457) This PR removes some `ChainSpec` types which are not necessary (left-overs from #1256). Currently `ChainSpec` does not have to be generic over the specific `RuntimeGenesisConfig`, it is enough to use single type for all: https://github.com/paritytech/polkadot-sdk/blob/9f787018857660440182142adc806954d7d07709/cumulus/polkadot-parachain/src/chain_spec/mod.rs#L53-L54 related to: https://github.com/paritytech/polkadot-sdk/issues/25 --------- Co-authored-by: command-bot <> --- .../src/chain_spec/asset_hubs.rs | 65 +++++------ .../src/chain_spec/bridge_hubs.rs | 92 +++++---------- .../src/chain_spec/collectives.rs | 22 ++-- .../src/chain_spec/contracts.rs | 17 ++- .../src/chain_spec/glutton.rs | 30 +++-- .../polkadot-parachain/src/chain_spec/mod.rs | 3 + .../src/chain_spec/penpal.rs | 9 +- .../src/chain_spec/rococo_parachain.rs | 12 +- .../src/chain_spec/seedling.rs | 9 +- .../src/chain_spec/shell.rs | 9 +- cumulus/polkadot-parachain/src/command.rs | 110 +++++------------- 11 files changed, 137 insertions(+), 241 deletions(-) diff --git a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs index a8d3d2975ad..2988d6af0d1 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs @@ -15,7 +15,8 @@ // along with Cumulus. If not, see . use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use cumulus_primitives_core::ParaId; use hex_literal::hex; @@ -23,16 +24,6 @@ use parachains_common::{AccountId, AssetHubPolkadotAuraId, AuraId, Balance as As use sc_service::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -/// Specialized `ChainSpec` for the normal parachain runtime. -pub type AssetHubPolkadotChainSpec = - sc_service::GenericChainSpec; -pub type AssetHubKusamaChainSpec = - sc_service::GenericChainSpec; -pub type AssetHubWestendChainSpec = - sc_service::GenericChainSpec; -pub type AssetHubRococoChainSpec = - sc_service::GenericChainSpec; - const ASSET_HUB_POLKADOT_ED: AssetHubBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; const ASSET_HUB_KUSAMA_ED: AssetHubBalance = @@ -72,13 +63,13 @@ pub fn asset_hub_westend_session_keys(keys: AuraId) -> asset_hub_westend_runtime asset_hub_westend_runtime::SessionKeys { aura: keys } } -pub fn asset_hub_polkadot_development_config() -> AssetHubPolkadotChainSpec { +pub fn asset_hub_polkadot_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 0.into()); properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - AssetHubPolkadotChainSpec::builder( + GenericChainSpec::builder( asset_hub_polkadot_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot-dev".into(), para_id: 1000 }, @@ -104,13 +95,13 @@ pub fn asset_hub_polkadot_development_config() -> AssetHubPolkadotChainSpec { .build() } -pub fn asset_hub_polkadot_local_config() -> AssetHubPolkadotChainSpec { +pub fn asset_hub_polkadot_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 0.into()); properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - AssetHubPolkadotChainSpec::builder( + GenericChainSpec::builder( asset_hub_polkadot_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot-local".into(), para_id: 1000 }, @@ -152,13 +143,13 @@ pub fn asset_hub_polkadot_local_config() -> AssetHubPolkadotChainSpec { } // Not used for syncing, but just to determine the genesis values set for the upgrade from shell. -pub fn asset_hub_polkadot_config() -> AssetHubPolkadotChainSpec { +pub fn asset_hub_polkadot_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 0.into()); properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - AssetHubPolkadotChainSpec::builder( + GenericChainSpec::builder( asset_hub_polkadot_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot".into(), para_id: 1000 }, @@ -249,13 +240,13 @@ fn asset_hub_polkadot_genesis( }) } -pub fn asset_hub_kusama_development_config() -> AssetHubKusamaChainSpec { +pub fn asset_hub_kusama_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 2.into()); properties.insert("tokenSymbol".into(), "KSM".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubKusamaChainSpec::builder( + GenericChainSpec::builder( asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama-dev".into(), para_id: 1000 }, ) @@ -280,13 +271,13 @@ pub fn asset_hub_kusama_development_config() -> AssetHubKusamaChainSpec { .build() } -pub fn asset_hub_kusama_local_config() -> AssetHubKusamaChainSpec { +pub fn asset_hub_kusama_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 2.into()); properties.insert("tokenSymbol".into(), "KSM".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubKusamaChainSpec::builder( + GenericChainSpec::builder( asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama-local".into(), para_id: 1000 }, ) @@ -325,13 +316,13 @@ pub fn asset_hub_kusama_local_config() -> AssetHubKusamaChainSpec { .build() } -pub fn asset_hub_kusama_config() -> AssetHubKusamaChainSpec { +pub fn asset_hub_kusama_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 2.into()); properties.insert("tokenSymbol".into(), "KSM".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubKusamaChainSpec::builder( + GenericChainSpec::builder( asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama".into(), para_id: 1000 }, ) @@ -407,12 +398,12 @@ fn asset_hub_kusama_genesis( }) } -pub fn asset_hub_westend_development_config() -> AssetHubWestendChainSpec { +pub fn asset_hub_westend_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::builder( + GenericChainSpec::builder( asset_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, @@ -439,12 +430,12 @@ pub fn asset_hub_westend_development_config() -> AssetHubWestendChainSpec { .build() } -pub fn asset_hub_westend_local_config() -> AssetHubWestendChainSpec { +pub fn asset_hub_westend_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::builder( + GenericChainSpec::builder( asset_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend-local".into(), para_id: 1000 }, @@ -485,12 +476,12 @@ pub fn asset_hub_westend_local_config() -> AssetHubWestendChainSpec { .build() } -pub fn asset_hub_westend_config() -> AssetHubWestendChainSpec { +pub fn asset_hub_westend_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - AssetHubWestendChainSpec::builder( + GenericChainSpec::builder( asset_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, @@ -569,7 +560,7 @@ fn asset_hub_westend_genesis( }) } -pub fn asset_hub_rococo_development_config() -> AssetHubRococoChainSpec { +pub fn asset_hub_rococo_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenSymbol".into(), "ROC".into()); @@ -587,8 +578,8 @@ fn asset_hub_rococo_like_development_config( name: &str, chain_id: &str, para_id: u32, -) -> AssetHubRococoChainSpec { - AssetHubRococoChainSpec::builder( +) -> GenericChainSpec { + GenericChainSpec::builder( asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-dev".into(), para_id }, ) @@ -614,7 +605,7 @@ fn asset_hub_rococo_like_development_config( .build() } -pub fn asset_hub_rococo_local_config() -> AssetHubRococoChainSpec { +pub fn asset_hub_rococo_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenSymbol".into(), "ROC".into()); @@ -632,8 +623,8 @@ fn asset_hub_rococo_like_local_config( name: &str, chain_id: &str, para_id: u32, -) -> AssetHubRococoChainSpec { - AssetHubRococoChainSpec::builder( +) -> GenericChainSpec { + GenericChainSpec::builder( asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), para_id }, ) @@ -673,12 +664,12 @@ fn asset_hub_rococo_like_local_config( .build() } -pub fn asset_hub_rococo_genesis_config() -> AssetHubRococoChainSpec { +pub fn asset_hub_rococo_genesis_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); let para_id = 1000; - AssetHubRococoChainSpec::builder( + GenericChainSpec::builder( asset_hub_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo".into(), para_id }, ) diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index eb253908f5f..377ceb20e1e 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, get_collator_keys_from_seed}; +use crate::chain_spec::{get_account_id_from_seed, get_collator_keys_from_seed, GenericChainSpec}; use cumulus_primitives_core::ParaId; use parachains_common::Balance as BridgeHubBalance; use sc_chain_spec::ChainSpec; use sp_core::sr25519; -use std::{path::PathBuf, str::FromStr}; +use std::str::FromStr; /// Collects all supported BridgeHub configurations #[derive(Debug, PartialEq)] @@ -71,33 +71,11 @@ impl FromStr for BridgeHubRuntimeType { impl BridgeHubRuntimeType { pub const ID_PREFIX: &'static str = "bridge-hub"; - pub fn chain_spec_from_json_file(&self, path: PathBuf) -> Result, String> { - match self { - BridgeHubRuntimeType::Polkadot | - BridgeHubRuntimeType::PolkadotLocal | - BridgeHubRuntimeType::PolkadotDevelopment => - Ok(Box::new(polkadot::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Kusama | - BridgeHubRuntimeType::KusamaLocal | - BridgeHubRuntimeType::KusamaDevelopment => - Ok(Box::new(kusama::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Westend | - BridgeHubRuntimeType::WestendLocal | - BridgeHubRuntimeType::WestendDevelopment => - Ok(Box::new(westend::BridgeHubChainSpec::from_json_file(path)?)), - BridgeHubRuntimeType::Rococo | - BridgeHubRuntimeType::RococoLocal | - BridgeHubRuntimeType::RococoDevelopment => - Ok(Box::new(rococo::BridgeHubChainSpec::from_json_file(path)?)), - } - } - pub fn load_config(&self) -> Result, String> { match self { - BridgeHubRuntimeType::Polkadot => - Ok(Box::new(polkadot::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-polkadot.json")[..], - )?)), + BridgeHubRuntimeType::Polkadot => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../chain-specs/bridge-hub-polkadot.json")[..], + )?)), BridgeHubRuntimeType::PolkadotLocal => Ok(Box::new(polkadot::local_config( polkadot::BRIDGE_HUB_POLKADOT_LOCAL, "Polkadot BridgeHub Local", @@ -110,10 +88,9 @@ impl BridgeHubRuntimeType { "polkadot-dev", ParaId::new(1002), ))), - BridgeHubRuntimeType::Kusama => - Ok(Box::new(kusama::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-kusama.json")[..], - )?)), + BridgeHubRuntimeType::Kusama => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../chain-specs/bridge-hub-kusama.json")[..], + )?)), BridgeHubRuntimeType::KusamaLocal => Ok(Box::new(kusama::local_config( kusama::BRIDGE_HUB_KUSAMA_LOCAL, "Kusama BridgeHub Local", @@ -126,10 +103,9 @@ impl BridgeHubRuntimeType { "kusama-dev", ParaId::new(1003), ))), - BridgeHubRuntimeType::Westend => - Ok(Box::new(westend::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-westend.json")[..], - )?)), + BridgeHubRuntimeType::Westend => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../chain-specs/bridge-hub-westend.json")[..], + )?)), BridgeHubRuntimeType::WestendLocal => Ok(Box::new(westend::local_config( westend::BRIDGE_HUB_WESTEND_LOCAL, "Westend BridgeHub Local", @@ -144,10 +120,9 @@ impl BridgeHubRuntimeType { ParaId::new(1002), Some("Bob".to_string()), ))), - BridgeHubRuntimeType::Rococo => - Ok(Box::new(rococo::BridgeHubChainSpec::from_json_bytes( - &include_bytes!("../../chain-specs/bridge-hub-rococo.json")[..], - )?)), + BridgeHubRuntimeType::Rococo => Ok(Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../../chain-specs/bridge-hub-rococo.json")[..], + )?)), BridgeHubRuntimeType::RococoLocal => Ok(Box::new(rococo::local_config( rococo::BRIDGE_HUB_ROCOCO_LOCAL, "Rococo BridgeHub Local", @@ -184,7 +159,7 @@ fn ensure_id(id: &str) -> Result<&str, String> { /// Sub-module for Rococo setup pub mod rococo { use super::{get_account_id_from_seed, get_collator_keys_from_seed, sr25519, ParaId}; - use crate::chain_spec::{Extensions, SAFE_XCM_VERSION}; + use crate::chain_spec::{Extensions, GenericChainSpec, SAFE_XCM_VERSION}; use parachains_common::{AccountId, AuraId}; use sc_chain_spec::ChainType; @@ -196,9 +171,6 @@ pub mod rococo { const BRIDGE_HUB_ROCOCO_ED: BridgeHubBalance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; - /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - pub fn local_config( id: &str, chain_name: &str, @@ -206,7 +178,7 @@ pub mod rococo { para_id: ParaId, bridges_pallet_owner_seed: Option, modify_props: ModifyProperties, - ) -> BridgeHubChainSpec { + ) -> GenericChainSpec { // Rococo defaults let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); @@ -214,7 +186,7 @@ pub mod rococo { properties.insert("tokenDecimals".into(), 12.into()); modify_props(&mut properties); - BridgeHubChainSpec::builder( + GenericChainSpec::builder( bridge_hub_rococo_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, @@ -303,7 +275,8 @@ pub mod rococo { pub mod kusama { use super::{BridgeHubBalance, ParaId}; use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use parachains_common::{AccountId, AuraId}; use sc_chain_spec::ChainType; @@ -315,21 +288,18 @@ pub mod kusama { const BRIDGE_HUB_KUSAMA_ED: BridgeHubBalance = parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; - /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - pub fn local_config( id: &str, chain_name: &str, relay_chain: &str, para_id: ParaId, - ) -> BridgeHubChainSpec { + ) -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 2.into()); properties.insert("tokenSymbol".into(), "KSM".into()); properties.insert("tokenDecimals".into(), 12.into()); - BridgeHubChainSpec::builder( + GenericChainSpec::builder( bridge_hub_kusama_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, @@ -411,7 +381,7 @@ pub mod kusama { /// Sub-module for Westend setup. pub mod westend { use super::{get_account_id_from_seed, get_collator_keys_from_seed, sr25519, ParaId}; - use crate::chain_spec::{Extensions, SAFE_XCM_VERSION}; + use crate::chain_spec::{Extensions, GenericChainSpec, SAFE_XCM_VERSION}; use parachains_common::{AccountId, AuraId}; use sc_chain_spec::ChainType; @@ -423,22 +393,18 @@ pub mod westend { const BRIDGE_HUB_WESTEND_ED: BridgeHubBalance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; - /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = - sc_service::GenericChainSpec; - pub fn local_config( id: &str, chain_name: &str, relay_chain: &str, para_id: ParaId, bridges_pallet_owner_seed: Option, - ) -> BridgeHubChainSpec { + ) -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - BridgeHubChainSpec::builder( + GenericChainSpec::builder( bridge_hub_westend_runtime::WASM_BINARY .expect("WASM binary was not build, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, @@ -527,7 +493,8 @@ pub mod westend { pub mod polkadot { use super::{BridgeHubBalance, ParaId}; use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use parachains_common::{AccountId, AuraId}; use sc_chain_spec::ChainType; @@ -539,21 +506,18 @@ pub mod polkadot { const BRIDGE_HUB_POLKADOT_ED: BridgeHubBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; - /// Specialized `ChainSpec` for the normal parachain runtime. - pub type BridgeHubChainSpec = sc_service::GenericChainSpec<(), Extensions>; - pub fn local_config( id: &str, chain_name: &str, relay_chain: &str, para_id: ParaId, - ) -> BridgeHubChainSpec { + ) -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 0.into()); properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - BridgeHubChainSpec::builder( + GenericChainSpec::builder( bridge_hub_polkadot_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, diff --git a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs index 07bd742fa8e..ac75a40ebde 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs @@ -15,16 +15,14 @@ // along with Cumulus. If not, see . use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use cumulus_primitives_core::ParaId; use parachains_common::{AccountId, AuraId, Balance as CollectivesBalance}; use sc_service::ChainType; use sp_core::sr25519; -pub type CollectivesPolkadotChainSpec = sc_service::GenericChainSpec<(), Extensions>; -pub type CollectivesWestendChainSpec = sc_service::GenericChainSpec<(), Extensions>; - const COLLECTIVES_POLKADOT_ED: CollectivesBalance = parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; const COLLECTIVES_WESTEND_ED: CollectivesBalance = @@ -39,13 +37,13 @@ pub fn collectives_polkadot_session_keys( collectives_polkadot_runtime::SessionKeys { aura: keys } } -pub fn collectives_polkadot_development_config() -> CollectivesPolkadotChainSpec { +pub fn collectives_polkadot_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 0.into()); properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - CollectivesPolkadotChainSpec::builder( + GenericChainSpec::builder( collectives_polkadot_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot-dev".into(), para_id: 1002 }, @@ -75,13 +73,13 @@ pub fn collectives_polkadot_development_config() -> CollectivesPolkadotChainSpec } /// Collectives Polkadot Local Config. -pub fn collectives_polkadot_local_config() -> CollectivesPolkadotChainSpec { +pub fn collectives_polkadot_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 0.into()); properties.insert("tokenSymbol".into(), "DOT".into()); properties.insert("tokenDecimals".into(), 10.into()); - CollectivesPolkadotChainSpec::builder( + GenericChainSpec::builder( collectives_polkadot_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "polkadot-local".into(), para_id: 1002 }, @@ -169,13 +167,13 @@ pub fn collectives_westend_session_keys(keys: AuraId) -> collectives_westend_run collectives_westend_runtime::SessionKeys { aura: keys } } -pub fn collectives_westend_development_config() -> CollectivesWestendChainSpec { +pub fn collectives_westend_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - CollectivesWestendChainSpec::builder( + GenericChainSpec::builder( collectives_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend-dev".into(), para_id: 1002 }, @@ -205,13 +203,13 @@ pub fn collectives_westend_development_config() -> CollectivesWestendChainSpec { } /// Collectives Westend Local Config. -pub fn collectives_westend_local_config() -> CollectivesWestendChainSpec { +pub fn collectives_westend_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); properties.insert("tokenSymbol".into(), "WND".into()); properties.insert("tokenDecimals".into(), 12.into()); - CollectivesWestendChainSpec::builder( + GenericChainSpec::builder( collectives_westend_runtime::WASM_BINARY .expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend-local".into(), para_id: 1002 }, diff --git a/cumulus/polkadot-parachain/src/chain_spec/contracts.rs b/cumulus/polkadot-parachain/src/chain_spec/contracts.rs index 7ca66354fbf..87ac1ed2fa1 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/contracts.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/contracts.rs @@ -15,7 +15,8 @@ // along with Cumulus. If not, see . use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use cumulus_primitives_core::ParaId; use hex_literal::hex; @@ -23,8 +24,6 @@ use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -pub type ContractsRococoChainSpec = sc_service::GenericChainSpec<(), Extensions>; - /// No relay chain suffix because the id is the same over all relay chains. const CONTRACTS_PARACHAIN_ID: u32 = 1002; @@ -32,12 +31,12 @@ const CONTRACTS_PARACHAIN_ID: u32 = 1002; const CONTRACTS_ROCOCO_ED: contracts_rococo_runtime::Balance = parachains_common::rococo::currency::EXISTENTIAL_DEPOSIT; -pub fn contracts_rococo_development_config() -> ContractsRococoChainSpec { +pub fn contracts_rococo_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::builder( + GenericChainSpec::builder( contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! @@ -79,12 +78,12 @@ pub fn contracts_rococo_development_config() -> ContractsRococoChainSpec { .build() } -pub fn contracts_rococo_local_config() -> ContractsRococoChainSpec { +pub fn contracts_rococo_local_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::builder( + GenericChainSpec::builder( contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), // You MUST set this to the correct network! @@ -126,13 +125,13 @@ pub fn contracts_rococo_local_config() -> ContractsRococoChainSpec { .build() } -pub fn contracts_rococo_config() -> ContractsRococoChainSpec { +pub fn contracts_rococo_config() -> GenericChainSpec { // Give your base currency a unit name and decimal places let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "ROC".into()); properties.insert("tokenDecimals".into(), 12.into()); - ContractsRococoChainSpec::builder( + GenericChainSpec::builder( contracts_rococo_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo".into(), para_id: CONTRACTS_PARACHAIN_ID } ) diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index aff1358d1ae..8eced8d8f81 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, Extensions}; +use crate::chain_spec::{get_account_id_from_seed, Extensions, GenericChainSpec}; use cumulus_primitives_core::ParaId; use parachains_common::AuraId; use sc_service::ChainType; @@ -22,12 +22,8 @@ use sp_core::sr25519; use super::get_collator_keys_from_seed; -/// Specialized `ChainSpec` for the Glutton parachain runtime. -pub type GluttonChainSpec = sc_service::GenericChainSpec<(), Extensions>; -pub type GluttonWestendChainSpec = sc_service::GenericChainSpec<(), Extensions>; - -pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { - GluttonChainSpec::builder( +pub fn glutton_development_config(para_id: ParaId) -> GenericChainSpec { + GenericChainSpec::builder( glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama-dev".into(), para_id: para_id.into() }, ) @@ -41,8 +37,8 @@ pub fn glutton_development_config(para_id: ParaId) -> GluttonChainSpec { .build() } -pub fn glutton_local_config(para_id: ParaId) -> GluttonChainSpec { - GluttonChainSpec::builder( +pub fn glutton_local_config(para_id: ParaId) -> GenericChainSpec { + GenericChainSpec::builder( glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama-local".into(), para_id: para_id.into() }, ) @@ -59,11 +55,11 @@ pub fn glutton_local_config(para_id: ParaId) -> GluttonChainSpec { .build() } -pub fn glutton_config(para_id: ParaId) -> GluttonChainSpec { +pub fn glutton_config(para_id: ParaId) -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 2.into()); - GluttonChainSpec::builder( + GenericChainSpec::builder( glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "kusama".into(), para_id: para_id.into() }, ) @@ -94,8 +90,8 @@ fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json:: }) } -pub fn glutton_westend_development_config(para_id: ParaId) -> GluttonWestendChainSpec { - GluttonWestendChainSpec::builder( +pub fn glutton_westend_development_config(para_id: ParaId) -> GenericChainSpec { + GenericChainSpec::builder( glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend-dev".into(), para_id: para_id.into() }, ) @@ -109,8 +105,8 @@ pub fn glutton_westend_development_config(para_id: ParaId) -> GluttonWestendChai .build() } -pub fn glutton_westend_local_config(para_id: ParaId) -> GluttonWestendChainSpec { - GluttonWestendChainSpec::builder( +pub fn glutton_westend_local_config(para_id: ParaId) -> GenericChainSpec { + GenericChainSpec::builder( glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend-local".into(), para_id: para_id.into() }, ) @@ -127,11 +123,11 @@ pub fn glutton_westend_local_config(para_id: ParaId) -> GluttonWestendChainSpec .build() } -pub fn glutton_westend_config(para_id: ParaId) -> GluttonWestendChainSpec { +pub fn glutton_westend_config(para_id: ParaId) -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("ss58Format".into(), 42.into()); - GluttonChainSpec::builder( + GenericChainSpec::builder( glutton_westend_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: para_id.into() }, ) diff --git a/cumulus/polkadot-parachain/src/chain_spec/mod.rs b/cumulus/polkadot-parachain/src/chain_spec/mod.rs index 9cd0a37ad63..e8ed8a74ed7 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/mod.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/mod.rs @@ -50,6 +50,9 @@ impl Extensions { } } +/// Generic chain spec for all polkadot-parachain runtimes +pub type GenericChainSpec = sc_service::GenericChainSpec<(), Extensions>; + /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) diff --git a/cumulus/polkadot-parachain/src/chain_spec/penpal.rs b/cumulus/polkadot-parachain/src/chain_spec/penpal.rs index 2e35ee231df..cb1cb632d63 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/penpal.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/penpal.rs @@ -15,23 +15,22 @@ // along with Cumulus. If not, see . use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, SAFE_XCM_VERSION, + get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, + SAFE_XCM_VERSION, }; use cumulus_primitives_core::ParaId; use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; use sp_core::sr25519; -/// Specialized `ChainSpec` for the normal parachain runtime. -pub type PenpalChainSpec = sc_service::GenericChainSpec<(), Extensions>; -pub fn get_penpal_chain_spec(id: ParaId, relay_chain: &str) -> PenpalChainSpec { +pub fn get_penpal_chain_spec(id: ParaId, relay_chain: &str) -> GenericChainSpec { // Give your base currency a unit name and decimal places let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "UNIT".into()); properties.insert("tokenDecimals".into(), 12u32.into()); properties.insert("ss58Format".into(), 42u32.into()); - PenpalChainSpec::builder( + GenericChainSpec::builder( penpal_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: relay_chain.into(), // You MUST set this to the correct network! diff --git a/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs b/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs index c2ba4431456..0434e5f7be8 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/rococo_parachain.rs @@ -16,7 +16,7 @@ //! ChainSpecs dedicated to Rococo parachain setups (for testing and example purposes) -use crate::chain_spec::{get_from_seed, Extensions, SAFE_XCM_VERSION}; +use crate::chain_spec::{get_from_seed, Extensions, GenericChainSpec, SAFE_XCM_VERSION}; use cumulus_primitives_core::ParaId; use hex_literal::hex; use parachains_common::AccountId; @@ -25,10 +25,8 @@ use rococo_parachain_runtime::AuraId; use sc_chain_spec::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -pub type RococoParachainChainSpec = sc_service::GenericChainSpec<(), Extensions>; - -pub fn rococo_parachain_local_config() -> RococoParachainChainSpec { - RococoParachainChainSpec::builder( +pub fn rococo_parachain_local_config() -> GenericChainSpec { + GenericChainSpec::builder( rococo_parachain_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), para_id: 1000 }, ) @@ -57,9 +55,9 @@ pub fn rococo_parachain_local_config() -> RococoParachainChainSpec { .build() } -pub fn staging_rococo_parachain_local_config() -> RococoParachainChainSpec { +pub fn staging_rococo_parachain_local_config() -> GenericChainSpec { #[allow(deprecated)] - RococoParachainChainSpec::builder( + GenericChainSpec::builder( rococo_parachain_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "rococo-local".into(), para_id: 1000 }, ) diff --git a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs index b034588e14c..32d51622054 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/seedling.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/seedling.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::{get_account_id_from_seed, Extensions}; +use crate::chain_spec::{get_account_id_from_seed, Extensions, GenericChainSpec}; use cumulus_primitives_core::ParaId; use parachains_common::{AccountId, AuraId}; use sc_service::ChainType; @@ -22,11 +22,8 @@ use sp_core::sr25519; use super::get_collator_keys_from_seed; -/// Specialized `ChainSpec` for the seedling parachain runtime. -pub type SeedlingChainSpec = sc_service::GenericChainSpec<(), Extensions>; - -pub fn get_seedling_chain_spec() -> SeedlingChainSpec { - SeedlingChainSpec::builder( +pub fn get_seedling_chain_spec() -> GenericChainSpec { + GenericChainSpec::builder( seedling_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 2000 }, ) diff --git a/cumulus/polkadot-parachain/src/chain_spec/shell.rs b/cumulus/polkadot-parachain/src/chain_spec/shell.rs index 02c65e809a6..e0a9875fb96 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/shell.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/shell.rs @@ -14,18 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use crate::chain_spec::Extensions; +use crate::chain_spec::{Extensions, GenericChainSpec}; use cumulus_primitives_core::ParaId; use parachains_common::AuraId; use sc_service::ChainType; use super::get_collator_keys_from_seed; -/// Specialized `ChainSpec` for the shell parachain runtime. -pub type ShellChainSpec = sc_service::GenericChainSpec<(), Extensions>; - -pub fn get_shell_chain_spec() -> ShellChainSpec { - ShellChainSpec::builder( +pub fn get_shell_chain_spec() -> GenericChainSpec { + GenericChainSpec::builder( shell_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), Extensions { relay_chain: "westend".into(), para_id: 1000 }, ) diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index ea19ad12ba0..f966a5db8f3 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -16,6 +16,7 @@ use crate::{ chain_spec, + chain_spec::GenericChainSpec, cli::{Cli, RelayChainCli, Subcommand}, fake_runtime_api::{ asset_hub_polkadot_aura::RuntimeApi as AssetHubPolkadotRuntimeApi, aura::RuntimeApi, @@ -128,18 +129,15 @@ fn load_spec(id: &str) -> std::result::Result, String> { // - Defaul-like "staging" => Box::new(chain_spec::rococo_parachain::staging_rococo_parachain_local_config()), - "tick" => - Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/tick.json")[..], - )?), - "trick" => - Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/trick.json")[..], - )?), - "track" => - Box::new(chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/track.json")[..], - )?), + "tick" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/tick.json")[..], + )?), + "trick" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/trick.json")[..], + )?), + "track" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/track.json")[..], + )?), // -- Starters "shell" => Box::new(chain_spec::shell::get_shell_chain_spec()), @@ -154,10 +152,9 @@ fn load_spec(id: &str) -> std::result::Result, String> { "asset-hub-polkadot-genesis" | "statemint-genesis" => Box::new(chain_spec::asset_hubs::asset_hub_polkadot_config()), // the shell-based chain spec as used for syncing - "asset-hub-polkadot" | "statemint" => - Box::new(chain_spec::asset_hubs::AssetHubPolkadotChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-polkadot.json")[..], - )?), + "asset-hub-polkadot" | "statemint" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-polkadot.json")[..], + )?), // -- Asset Hub Kusama "asset-hub-kusama-dev" | "statemine-dev" => @@ -168,10 +165,9 @@ fn load_spec(id: &str) -> std::result::Result, String> { "asset-hub-kusama-genesis" | "statemine-genesis" => Box::new(chain_spec::asset_hubs::asset_hub_kusama_config()), // the shell-based chain spec as used for syncing - "asset-hub-kusama" | "statemine" => - Box::new(chain_spec::asset_hubs::AssetHubKusamaChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-kusama.json")[..], - )?), + "asset-hub-kusama" | "statemine" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-kusama.json")[..], + )?), // -- Asset Hub Rococo "asset-hub-rococo-dev" => @@ -181,10 +177,9 @@ fn load_spec(id: &str) -> std::result::Result, String> { // the chain spec as used for generating the upgrade genesis values "asset-hub-rococo-genesis" => Box::new(chain_spec::asset_hubs::asset_hub_rococo_genesis_config()), - "asset-hub-rococo" => - Box::new(chain_spec::asset_hubs::AssetHubRococoChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-rococo.json")[..], - )?), + "asset-hub-rococo" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-rococo.json")[..], + )?), // -- Asset Hub Westend "asset-hub-westend-dev" | "westmint-dev" => @@ -195,28 +190,25 @@ fn load_spec(id: &str) -> std::result::Result, String> { "asset-hub-westend-genesis" | "westmint-genesis" => Box::new(chain_spec::asset_hubs::asset_hub_westend_config()), // the shell-based chain spec as used for syncing - "asset-hub-westend" | "westmint" => - Box::new(chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/asset-hub-westend.json")[..], - )?), + "asset-hub-westend" | "westmint" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/asset-hub-westend.json")[..], + )?), // -- Polkadot Collectives "collectives-polkadot-dev" => Box::new(chain_spec::collectives::collectives_polkadot_development_config()), "collectives-polkadot-local" => Box::new(chain_spec::collectives::collectives_polkadot_local_config()), - "collectives-polkadot" => - Box::new(chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/collectives-polkadot.json")[..], - )?), + "collectives-polkadot" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/collectives-polkadot.json")[..], + )?), "collectives-westend-dev" => Box::new(chain_spec::collectives::collectives_westend_development_config()), "collectives-westend-local" => Box::new(chain_spec::collectives::collectives_westend_local_config()), - "collectives-westend" => - Box::new(chain_spec::collectives::CollectivesWestendChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/collectives-westend.json")[..], - )?), + "collectives-westend" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/collectives-westend.json")[..], + )?), // -- Contracts on Rococo "contracts-rococo-dev" => @@ -224,10 +216,9 @@ fn load_spec(id: &str) -> std::result::Result, String> { "contracts-rococo-local" => Box::new(chain_spec::contracts::contracts_rococo_local_config()), "contracts-rococo-genesis" => Box::new(chain_spec::contracts::contracts_rococo_config()), - "contracts-rococo" => - Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_bytes( - &include_bytes!("../chain-specs/contracts-rococo.json")[..], - )?), + "contracts-rococo" => Box::new(GenericChainSpec::from_json_bytes( + &include_bytes!("../chain-specs/contracts-rococo.json")[..], + )?), // -- BridgeHub bridge_like_id @@ -279,44 +270,7 @@ fn load_spec(id: &str) -> std::result::Result, String> { }, // -- Loading a specific spec from disk - path => { - let path: PathBuf = path.into(); - match path.runtime() { - Runtime::AssetHubPolkadot => Box::new( - chain_spec::asset_hubs::AssetHubPolkadotChainSpec::from_json_file(path)?, - ), - Runtime::AssetHubKusama => - Box::new(chain_spec::asset_hubs::AssetHubKusamaChainSpec::from_json_file(path)?), - Runtime::AssetHubRococo => - Box::new(chain_spec::asset_hubs::AssetHubRococoChainSpec::from_json_file(path)?), - Runtime::AssetHubWestend => Box::new( - chain_spec::asset_hubs::AssetHubWestendChainSpec::from_json_file(path)?, - ), - Runtime::CollectivesPolkadot => Box::new( - chain_spec::collectives::CollectivesPolkadotChainSpec::from_json_file(path)?, - ), - Runtime::CollectivesWestend => Box::new( - chain_spec::collectives::CollectivesWestendChainSpec::from_json_file(path)?, - ), - Runtime::Shell => - Box::new(chain_spec::shell::ShellChainSpec::from_json_file(path)?), - Runtime::Seedling => - Box::new(chain_spec::seedling::SeedlingChainSpec::from_json_file(path)?), - Runtime::ContractsRococo => - Box::new(chain_spec::contracts::ContractsRococoChainSpec::from_json_file(path)?), - Runtime::BridgeHub(bridge_hub_runtime_type) => - bridge_hub_runtime_type.chain_spec_from_json_file(path)?, - Runtime::Penpal(_para_id) => - Box::new(chain_spec::penpal::PenpalChainSpec::from_json_file(path)?), - Runtime::GluttonWestend => - Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), - Runtime::Glutton => - Box::new(chain_spec::glutton::GluttonChainSpec::from_json_file(path)?), - Runtime::Default => Box::new( - chain_spec::rococo_parachain::RococoParachainChainSpec::from_json_file(path)?, - ), - } - }, + path => Box::new(GenericChainSpec::from_json_file(path.into())?), }) } -- GitLab From 73ff1c3647024d3a5e94727e7fb74714c3a98b47 Mon Sep 17 00:00:00 2001 From: Wil Wade Date: Sat, 25 Nov 2023 05:30:57 -0500 Subject: [PATCH 125/199] Update documentation for SafeMode and TxPause Pallets (#2413) # Description Documentation for the TxPause and SafeMode pallets. Based on reading and the notes from the following related PRs: - https://github.com/paritytech/substrate/pull/12092 I believe this also completes the checklist to be able to close the related PRs: - Close #302 - Close #274 --------- Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> --- Cargo.lock | 2 + substrate/frame/safe-mode/Cargo.toml | 1 + substrate/frame/safe-mode/src/lib.rs | 56 ++++++++++++++++++++++++++ substrate/frame/safe-mode/src/tests.rs | 3 ++ substrate/frame/tx-pause/Cargo.toml | 1 + substrate/frame/tx-pause/src/lib.rs | 56 ++++++++++++++++++++++++++ substrate/frame/tx-pause/src/tests.rs | 3 ++ 7 files changed, 122 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 3fbede968bc..8a3f8e05fbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10795,6 +10795,7 @@ dependencies = [ name = "pallet-safe-mode" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", @@ -11198,6 +11199,7 @@ dependencies = [ name = "pallet-tx-pause" version = "4.0.0-dev" dependencies = [ + "docify", "frame-benchmarking", "frame-support", "frame-system", diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index ac469bb385c..f7b4ea4dd8c 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -13,6 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +docify = "0.2.6" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/safe-mode/src/lib.rs b/substrate/frame/safe-mode/src/lib.rs index b8e8378fa9e..554f509db63 100644 --- a/substrate/frame/safe-mode/src/lib.rs +++ b/substrate/frame/safe-mode/src/lib.rs @@ -15,6 +15,62 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! # Safe Mode +//! +//! Trigger for stopping all extrinsics outside of a specific whitelist. +//! +//! ## WARNING +//! +//! NOT YET AUDITED. DO NOT USE IN PRODUCTION. +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events, and errors. +//! +//! ## Overview +//! +//! Safe mode is entered via two paths (deposit or forced) until a set block number. +//! The mode is exited when the block number is reached or a call to one of the exit extrinsics is +//! made. A `WhitelistedCalls` configuration item contains all calls that can be executed while in +//! safe mode. +//! +//! ### Primary Features +//! +//! - Entering safe mode can be via privileged origin or anyone who places a deposit. +//! - Origin configuration items are separated for privileged entering and exiting safe mode. +//! - A configurable duration sets the number of blocks after which the system will exit safe mode. +//! - Safe mode may be extended beyond the configured exit by additional calls. +//! +//! ### Example +//! +//! Configuration of call filters: +//! +//! ```ignore +//! impl frame_system::Config for Runtime { +//! // … +//! type BaseCallFilter = InsideBoth; +//! // … +//! } +//! ``` +//! +//! Entering safe mode with deposit: +#![doc = docify::embed!("src/tests.rs", can_activate)] +//! +//! Entering safe mode via privileged origin: +#![doc = docify::embed!("src/tests.rs", can_force_activate_with_config_origin)] +//! +//! Exiting safe mode via privileged origin: +#![doc = docify::embed!("src/tests.rs", can_force_deactivate_with_config_origin)] +//! +//! ## Low Level / Implementation Details +//! +//! ### Use Cost +//! +//! A storage value (`EnteredUntil`) is used to store the block safe mode will be exited on. +//! Using the call filter will require a db read of that storage on the first extrinsic. +//! The storage will be added to the overlay and incur low cost for all additional calls. + #![cfg_attr(not(feature = "std"), no_std)] #![deny(rustdoc::broken_intra_doc_links)] diff --git a/substrate/frame/safe-mode/src/tests.rs b/substrate/frame/safe-mode/src/tests.rs index b92c5b87a53..c0a2f45a3e7 100644 --- a/substrate/frame/safe-mode/src/tests.rs +++ b/substrate/frame/safe-mode/src/tests.rs @@ -189,6 +189,7 @@ fn can_filter_balance_in_proxy_when_activated() { }); } +#[docify::export] #[test] fn can_activate() { new_test_ext().execute_with(|| { @@ -271,6 +272,7 @@ fn fails_force_deactivate_if_not_activated() { }); } +#[docify::export] #[test] fn can_force_activate_with_config_origin() { new_test_ext().execute_with(|| { @@ -288,6 +290,7 @@ fn can_force_activate_with_config_origin() { }); } +#[docify::export] #[test] fn can_force_deactivate_with_config_origin() { new_test_ext().execute_with(|| { diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index 9af424f541c..bf0641f5b6d 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -13,6 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +docify = "0.2.6" frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/tx-pause/src/lib.rs b/substrate/frame/tx-pause/src/lib.rs index a3be0f50172..31be575fba7 100644 --- a/substrate/frame/tx-pause/src/lib.rs +++ b/substrate/frame/tx-pause/src/lib.rs @@ -15,6 +15,62 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! # Transaction Pause +//! +//! Allows dynamic, chain-state-based pausing and unpausing of specific extrinsics via call filters. +//! +//! ## WARNING +//! +//! NOT YET AUDITED. DO NOT USE IN PRODUCTION. +//! +//! ## Pallet API +//! +//! See the [`pallet`] module for more information about the interfaces this pallet exposes, +//! including its configuration trait, dispatchables, storage items, events, and errors. +//! +//! ## Overview +//! +//! A dynamic call filter that can be controlled with extrinsics. +//! +//! Pausing an extrinsic means that the extrinsic CANNOT be called again until it is unpaused. +//! The exception is calls that use `dispatch_bypass_filter`, typically only with the root origin. +//! +//! ### Primary Features +//! +//! - Calls that should never be paused can be added to a whitelist. +//! - Separate origins are configurable for pausing and pausing. +//! - Pausing is triggered using the string representation of the call. +//! - Pauses can target a single extrinsic or an entire pallet. +//! - Pauses can target future extrinsics or pallets. +//! +//! ### Example +//! +//! Configuration of call filters: +//! +//! ```ignore +//! impl frame_system::Config for Runtime { +//! // … +//! type BaseCallFilter = InsideBoth; +//! // … +//! } +//! ``` +//! +//! Pause specific all: +#![doc = docify::embed!("src/tests.rs", can_pause_specific_call)] +//! +//! Unpause specific all: +#![doc = docify::embed!("src/tests.rs", can_unpause_specific_call)] +//! +//! Pause all calls in a pallet: +#![doc = docify::embed!("src/tests.rs", can_pause_all_calls_in_pallet_except_on_whitelist)] +//! +//! ## Low Level / Implementation Details +//! +//! ### Use Cost +//! +//! A storage map (`PausedCalls`) is used to store currently paused calls. +//! Using the call filter will require a db read of that storage on each extrinsic. + #![cfg_attr(not(feature = "std"), no_std)] #![deny(rustdoc::broken_intra_doc_links)] diff --git a/substrate/frame/tx-pause/src/tests.rs b/substrate/frame/tx-pause/src/tests.rs index a71ff3439d9..823abf9d9c4 100644 --- a/substrate/frame/tx-pause/src/tests.rs +++ b/substrate/frame/tx-pause/src/tests.rs @@ -25,6 +25,7 @@ use sp_runtime::DispatchError; // GENERAL SUCCESS/POSITIVE TESTS --------------------- +#[docify::export] #[test] fn can_pause_specific_call() { new_test_ext().execute_with(|| { @@ -43,6 +44,7 @@ fn can_pause_specific_call() { }); } +#[docify::export] #[test] fn can_pause_all_calls_in_pallet_except_on_whitelist() { new_test_ext().execute_with(|| { @@ -64,6 +66,7 @@ fn can_pause_all_calls_in_pallet_except_on_whitelist() { }); } +#[docify::export] #[test] fn can_unpause_specific_call() { new_test_ext().execute_with(|| { -- GitLab From cfa19c37e60f094a2954ffa73b1da261c050f61f Mon Sep 17 00:00:00 2001 From: Marcin S Date: Sat, 25 Nov 2023 17:03:58 +0100 Subject: [PATCH 126/199] PVF: remove audit log access (#2461) --- .gitlab/pipeline/test.yml | 2 +- .../node/core/pvf/src/execute/worker_intf.rs | 31 +--- .../node/core/pvf/src/prepare/worker_intf.rs | 26 +-- polkadot/node/core/pvf/src/security.rs | 149 ------------------ 4 files changed, 3 insertions(+), 205 deletions(-) diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 79ef9070dba..9c3fa7701c8 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -522,4 +522,4 @@ test-syscalls: - if [[ "$CI_JOB_STATUS" == "failed" ]]; then printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; fi - allow_failure: true # TODO: remove this once we have an idea how often the syscall lists will change + allow_failure: false # this rarely triggers in practice diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_intf.rs index fc6cd8fc725..16a7c290b52 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_intf.rs @@ -18,7 +18,6 @@ use crate::{ artifacts::ArtifactPathId, - security, worker_intf::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, @@ -33,7 +32,7 @@ use polkadot_node_core_pvf_common::{ execute::{Handshake, WorkerResponse}, worker_dir, SecurityStatus, }; -use polkadot_parachain_primitives::primitives::{ValidationCodeHash, ValidationResult}; +use polkadot_parachain_primitives::primitives::ValidationResult; use polkadot_primitives::ExecutorParams; use std::{path::Path, time::Duration}; use tokio::{io, net::UnixStream}; @@ -132,10 +131,7 @@ pub async fn start_work( artifact.path.display(), ); - let artifact_path = artifact.path.clone(); with_worker_dir_setup(worker_dir, pid, &artifact.path, |worker_dir| async move { - let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; - if let Err(error) = send_request(&mut stream, &validation_params, execution_timeout).await { gum::warn!( target: LOG_TARGET, @@ -160,10 +156,7 @@ pub async fn start_work( handle_response( response, pid, - &artifact.id.code_hash, - &artifact_path, execution_timeout, - audit_log_file ) .await, Err(error) => { @@ -217,10 +210,7 @@ pub async fn start_work( async fn handle_response( response: WorkerResponse, worker_pid: u32, - validation_code_hash: &ValidationCodeHash, - artifact_path: &Path, execution_timeout: Duration, - audit_log_file: Option, ) -> WorkerResponse { if let WorkerResponse::Ok { duration, .. } = response { if duration > execution_timeout { @@ -238,25 +228,6 @@ async fn handle_response( } } - if let WorkerResponse::JobDied { err: _, job_pid } = response { - // The job died. Check if it was due to a seccomp violation. - // - // NOTE: Log, but don't change the outcome. Not all validators may have - // auditing enabled, so we don't want attackers to abuse a non-deterministic - // outcome. - for syscall in security::check_seccomp_violations_for_job(audit_log_file, job_pid).await { - gum::error!( - target: LOG_TARGET, - %worker_pid, - %job_pid, - %syscall, - ?validation_code_hash, - ?artifact_path, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - } - response } diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_intf.rs index f978005068c..a393e9baa9e 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_intf.rs @@ -19,7 +19,6 @@ use crate::{ artifacts::ArtifactId, metrics::Metrics, - security, worker_intf::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, @@ -134,7 +133,6 @@ pub async fn start_work( pid, |tmp_artifact_file, mut stream, worker_dir| async move { let preparation_timeout = pvf.prep_timeout(); - let audit_log_file = security::AuditLogFile::try_open_and_seek_to_end().await; if let Err(err) = send_request(&mut stream, &pvf).await { gum::warn!( @@ -170,7 +168,6 @@ pub async fn start_work( &pvf, &cache_path, preparation_timeout, - audit_log_file, ) .await, Ok(Err(err)) => { @@ -211,34 +208,13 @@ async fn handle_response( pvf: &PvfPrepData, cache_path: &Path, preparation_timeout: Duration, - audit_log_file: Option, ) -> Outcome { let PrepareWorkerSuccess { checksum, stats: PrepareStats { cpu_time_elapsed, memory_stats } } = match result.clone() { Ok(result) => result, // Timed out on the child. This should already be logged by the child. Err(PrepareError::TimedOut) => return Outcome::TimedOut, - Err(PrepareError::JobDied { err, job_pid }) => { - // The job died. Check if it was due to a seccomp violation. - // - // NOTE: Log, but don't change the outcome. Not all validators may have - // auditing enabled, so we don't want attackers to abuse a non-deterministic - // outcome. - for syscall in - security::check_seccomp_violations_for_job(audit_log_file, job_pid).await - { - gum::error!( - target: LOG_TARGET, - %worker_pid, - %job_pid, - %syscall, - ?pvf, - "A forbidden syscall was attempted! This is a violation of our seccomp security policy. Report an issue ASAP!" - ); - } - - return Outcome::JobDied { err, job_pid } - }, + Err(PrepareError::JobDied { err, job_pid }) => return Outcome::JobDied { err, job_pid }, Err(PrepareError::OutOfMemory) => return Outcome::OutOfMemory, Err(err) => return Outcome::Concluded { worker, result: Err(err) }, }; diff --git a/polkadot/node/core/pvf/src/security.rs b/polkadot/node/core/pvf/src/security.rs index ef8714f58c8..2fd3b53e96b 100644 --- a/polkadot/node/core/pvf/src/security.rs +++ b/polkadot/node/core/pvf/src/security.rs @@ -17,10 +17,6 @@ use crate::{Config, SecurityStatus, LOG_TARGET}; use futures::join; use std::{fmt, path::Path}; -use tokio::{ - fs::{File, OpenOptions}, - io::{AsyncReadExt, AsyncSeekExt, SeekFrom}, -}; const SECURE_MODE_ANNOUNCEMENT: &'static str = "In the next release this will be a hard error by default. @@ -35,7 +31,6 @@ const SECURE_MODE_ANNOUNCEMENT: &'static str = pub async fn check_security_status(config: &Config) -> SecurityStatus { let Config { prepare_worker_program_path, cache_path, .. } = config; - // TODO: add check that syslog is available and that seccomp violations are logged? let (landlock, seccomp, change_root) = join!( check_landlock(prepare_worker_program_path), check_seccomp(prepare_worker_program_path), @@ -303,147 +298,3 @@ async fn check_seccomp( } } } - -const AUDIT_LOG_PATH: &'static str = "/var/log/audit/audit.log"; -const SYSLOG_PATH: &'static str = "/var/log/syslog"; - -/// System audit log. -pub struct AuditLogFile { - file: File, - path: &'static str, -} - -impl AuditLogFile { - /// Looks for an audit log file on the system and opens it, seeking to the end to skip any - /// events from before this was called. - /// - /// A bit of a verbose name, but it should clue future refactorers not to move calls closer to - /// where the `AuditLogFile` is used. - pub async fn try_open_and_seek_to_end() -> Option { - let mut path = AUDIT_LOG_PATH; - let mut file = match OpenOptions::new().read(true).open(AUDIT_LOG_PATH).await { - Ok(file) => Ok(file), - Err(_) => { - path = SYSLOG_PATH; - OpenOptions::new().read(true).open(SYSLOG_PATH).await - }, - } - .ok()?; - - let _pos = file.seek(SeekFrom::End(0)).await; - - Some(Self { file, path }) - } - - async fn read_new_since_open(mut self) -> String { - let mut buf = String::new(); - let _len = self.file.read_to_string(&mut buf).await; - buf - } -} - -/// Check if a seccomp violation occurred for the given job process. As the syslog may be in a -/// different location, or seccomp auditing may be disabled, this function provides a best-effort -/// attempt only. -/// -/// The `audit_log_file` must have been obtained before the job started. It only allows reading -/// entries that were written since it was obtained, so that we do not consider events from previous -/// processes with the same pid. This can still be racy, but it's unlikely and fine for a -/// best-effort attempt. -pub async fn check_seccomp_violations_for_job( - audit_log_file: Option, - job_pid: i32, -) -> Vec { - let audit_event_pid_field = format!("pid={job_pid}"); - - let audit_log_file = match audit_log_file { - Some(file) => { - gum::trace!( - target: LOG_TARGET, - %job_pid, - audit_log_path = ?file.path, - "checking audit log for seccomp violations", - ); - file - }, - None => { - gum::warn!( - target: LOG_TARGET, - %job_pid, - "could not open either {AUDIT_LOG_PATH} or {SYSLOG_PATH} for reading audit logs" - ); - return vec![] - }, - }; - let events = audit_log_file.read_new_since_open().await; - - let mut violations = vec![]; - for event in events.lines() { - if let Some(syscall) = parse_audit_log_for_seccomp_event(event, &audit_event_pid_field) { - violations.push(syscall); - } - } - - violations -} - -fn parse_audit_log_for_seccomp_event(event: &str, audit_event_pid_field: &str) -> Option { - const SECCOMP_AUDIT_EVENT_TYPE: &'static str = "type=1326"; - - // Do a series of simple .contains instead of a regex, because I'm not sure if the fields are - // guaranteed to always be in the same order. - if !event.contains(SECCOMP_AUDIT_EVENT_TYPE) || !event.contains(&audit_event_pid_field) { - return None - } - - // Get the syscall. Let's avoid a dependency on regex just for this. - for field in event.split(" ") { - if let Some(syscall) = field.strip_prefix("syscall=") { - return syscall.parse::().ok() - } - } - - None -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_audit_log_for_seccomp_event() { - let audit_event_pid_field = "pid=2559058"; - - assert_eq!( - parse_audit_log_for_seccomp_event( - r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1326 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559058 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, - audit_event_pid_field - ), - Some(53) - ); - // pid is wrong - assert_eq!( - parse_audit_log_for_seccomp_event( - r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1326 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, - audit_event_pid_field - ), - None - ); - // type is wrong - assert_eq!( - parse_audit_log_for_seccomp_event( - r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1327 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e syscall=53 compat=0 ip=0x7f7542c80d5e code=0x80000000"#, - audit_event_pid_field - ), - None - ); - // no syscall field - assert_eq!( - parse_audit_log_for_seccomp_event( - r#"Oct 24 13:15:24 build kernel: [5883980.283910] audit: type=1327 audit(1698153324.786:23): auid=0 uid=0 gid=0 ses=2162 subj=unconfined pid=2559057 comm="polkadot-prepar" exe="/root/paritytech/polkadot-sdk-2/target/debug/polkadot-prepare-worker" sig=31 arch=c000003e compat=0 ip=0x7f7542c80d5e code=0x80000000"#, - audit_event_pid_field - ), - None - ); - } -} -- GitLab From 4f8048b9c7a333cd8020ec4498fe96567ddeee18 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 27 Nov 2023 10:16:35 +0100 Subject: [PATCH 127/199] New runtime `spec_version` format + backport of the bump to 1.4.0 (#2468) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Overview This PR aligns the `spec_version` formatting to the [recent changes](https://github.com/polkadot-fellows/runtimes/pull/26/files#diff-efa4caeb17487ecb13d8f5eb7863c3241d84afa2e73fbf25909a2ca89df0f362R142) made for the Polkadot/Kusama runtimes. It also backports the latest version `v1.4.0` bumps as `1_004_000`. ## Details During the switch from `v0.9` to `v1.x`, the format of the `spec_version` was modified from: `(M)m_ppp` for a runtime considered on version `M.m.pp`. For instance `0.9.42` had a `spec_version` of `9420`. With the transition to `v1.x`, the format was changed to a bigger number (still `u32`) formatted as `MM_mm_ppp` where `1.2.3` would be stored as `01_02_003`. This PR aligns the format with that has been introduced in the fellowship repo: `MMM_mmm_ppp`. --------- Co-authored-by: Bastian Köcher --- .../parachains/runtimes/assets/asset-hub-kusama/src/lib.rs | 4 ++-- .../parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs | 2 +- .../parachains/runtimes/assets/asset-hub-rococo/src/lib.rs | 4 ++-- .../parachains/runtimes/assets/asset-hub-westend/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 2 +- .../runtimes/collectives/collectives-polkadot/src/lib.rs | 2 +- .../runtimes/collectives/collectives-westend/src/lib.rs | 2 +- .../parachains/runtimes/contracts/contracts-rococo/src/lib.rs | 2 +- cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs | 2 +- .../parachains/runtimes/glutton/glutton-westend/src/lib.rs | 2 +- cumulus/parachains/runtimes/starters/seedling/src/lib.rs | 2 +- cumulus/parachains/runtimes/testing/penpal/src/lib.rs | 2 +- .../parachains/runtimes/testing/rococo-parachain/src/lib.rs | 2 +- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- 18 files changed, 20 insertions(+), 20 deletions(-) diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs index af0116d7014..4c76262f60a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs @@ -114,7 +114,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 13, @@ -130,7 +130,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 13, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs index 1b7ef10f485..78721b194d0 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs @@ -140,7 +140,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemint"), impl_name: create_runtime_str!("statemint"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 13, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 02b51310890..c6c9735ecb1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -114,7 +114,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 13, @@ -127,7 +127,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("statemine"), impl_name: create_runtime_str!("statemine"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 13, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index d52edfe479c..a0d5a528c3f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -111,7 +111,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westmint"), impl_name: create_runtime_str!("westmint"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 13, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs index d2db0340790..a511791d347 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs @@ -135,7 +135,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-kusama"), impl_name: create_runtime_str!("bridge-hub-kusama"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs index 02f05a8bb87..e5c4f00f28e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs @@ -135,7 +135,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-polkadot"), impl_name: create_runtime_str!("bridge-hub-polkadot"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 7d64623f551..152e071a26a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -170,7 +170,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 9c97728058f..764b9825d4e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -170,7 +170,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 3, diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs index c3d671c9085..90c44f8bfc5 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs @@ -115,7 +115,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives"), impl_name: create_runtime_str!("collectives"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 8c5593e154d..54cb898fd6c 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -110,7 +110,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("collectives-westend"), impl_name: create_runtime_str!("collectives-westend"), authoring_version: 1, - spec_version: 1_003_000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 9d6a53c5ed3..a7c06103c4a 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -131,7 +131,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("contracts-rococo"), impl_name: create_runtime_str!("contracts-rococo"), authoring_version: 1, - spec_version: 10001, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs index 60a5d004e6c..ef0734df25f 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs @@ -98,7 +98,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton"), impl_name: create_runtime_str!("glutton"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 60107281c22..10f4d5f3307 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -99,7 +99,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("glutton-westend"), impl_name: create_runtime_str!("glutton-westend"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 43c8f1488a6..7619ed19798 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -75,7 +75,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("seedling"), impl_name: create_runtime_str!("seedling"), authoring_version: 1, - spec_version: 10000, + spec_version: 1, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 1ddad31920a..b2a04272a4a 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -230,7 +230,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("penpal-parachain"), impl_name: create_runtime_str!("penpal-parachain"), authoring_version: 1, - spec_version: 10000, + spec_version: 1, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 1, diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 965fb0d6adc..3a890d7953a 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -106,7 +106,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test-parachain"), impl_name: create_runtime_str!("test-parachain"), authoring_version: 1, - spec_version: 10000, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 6, diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 675e0a20b2b..12388da1868 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -150,7 +150,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("rococo"), impl_name: create_runtime_str!("parity-rococo-v2.0"), authoring_version: 0, - spec_version: 10020, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 22, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 29183fdfe00..6bbdb2db098 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -145,7 +145,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 10020, + spec_version: 1_004_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 22, -- GitLab From dc69dbba728bf447bb447746e41a86367d939030 Mon Sep 17 00:00:00 2001 From: Maciej Date: Mon, 27 Nov 2023 09:30:13 +0000 Subject: [PATCH 128/199] Zombienet tests - disputes on finalized blocks (#2184) **Overview:** Adding an extra malus variant focusing on disputing finalized blocks. It will: - wrap around approval-voting - listen to `OverseerSignal::BlockFinalized` and when encountered start a dispute for the `dispute_offset`th ancestor - simply pass through all other messages and signals Add zombienet tests testing various edgecases: - disputing freshly finalized blocks - disputing stale finalized blocks - disputing eagerly pruned finalized blocks (might be separate PR) **TODO:** - [x] Register new malus variant - [x] Simple pass through wrapper (approval-voting) - [x] Simple network definition - [x] Listen to block finalizations - [x] Fetch ancestor hash - [x] Fetch session index - [x] Fetch candidate - [x] Construct and send dispute message - [x] zndsl test 1 checking that disputes on fresh finalizations resolve valid Closes #1365 - [x] zndsl test 2 checking that disputes for too old finalized blocks are not possible Closes #1364 - [ ] zndsl test 3 checking that disputes for candidates with eagerly pruned relay parent state are handled correctly #1359 (deferred to a separate PR) - [x] Unit tests for new malus variant (testing cli etc) - [x] Clean/streamline error handling - [ ] ~~Ensure it tests properly on session boundaries~~ --------- Co-authored-by: Javier Viola Co-authored-by: Marcin S. Co-authored-by: Tsvetomir Dimitrov --- .gitlab/pipeline/zombienet/polkadot.yml | 16 ++ polkadot/node/malus/src/malus.rs | 46 +++ polkadot/node/malus/src/variants/common.rs | 4 +- .../variants/dispute_finalized_candidates.rs | 265 ++++++++++++++++++ polkadot/node/malus/src/variants/mod.rs | 2 + .../src/variants/suggest_garbage_candidate.rs | 2 +- .../0007-dispute-freshly-finalized.toml | 40 +++ .../0007-dispute-freshly-finalized.zndsl | 29 ++ .../0008-dispute-old-finalized.toml | 40 +++ .../0008-dispute-old-finalized.zndsl | 21 ++ 10 files changed, 462 insertions(+), 3 deletions(-) create mode 100644 polkadot/node/malus/src/variants/dispute_finalized_candidates.rs create mode 100644 polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml create mode 100644 polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl create mode 100644 polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml create mode 100644 polkadot/zombienet_tests/functional/0008-dispute-old-finalized.zndsl diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 995dd982532..d1f3a201c80 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -115,6 +115,22 @@ zombienet-polkadot-functional-0006-parachains-max-tranche0: --local-dir="${LOCAL_DIR}/functional" --test="0006-parachains-max-tranche0.zndsl" +zombienet-polkadot-functional-0007-dispute-freshly-finalized: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0007-dispute-freshly-finalized.zndsl" + +zombienet-polkadot-functional-0008-dispute-old-finalized: + extends: + - .zombienet-polkadot-common + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh + --local-dir="${LOCAL_DIR}/functional" + --test="0008-dispute-old-finalized.zndsl" + zombienet-polkadot-smoke-0001-parachains-smoke-test: extends: - .zombienet-polkadot-common diff --git a/polkadot/node/malus/src/malus.rs b/polkadot/node/malus/src/malus.rs index 69dd7c869fc..b8a83e54d4f 100644 --- a/polkadot/node/malus/src/malus.rs +++ b/polkadot/node/malus/src/malus.rs @@ -36,6 +36,8 @@ enum NemesisVariant { BackGarbageCandidate(BackGarbageCandidateOptions), /// Delayed disputing of ancestors that are perfectly fine. DisputeAncestor(DisputeAncestorOptions), + /// Delayed disputing of finalized candidates. + DisputeFinalizedCandidates(DisputeFinalizedCandidatesOptions), } #[derive(Debug, Parser)] @@ -80,6 +82,15 @@ impl MalusCli { finality_delay, )? }, + NemesisVariant::DisputeFinalizedCandidates(opts) => { + let DisputeFinalizedCandidatesOptions { dispute_offset, cli } = opts; + + polkadot_cli::run_node( + cli, + DisputeFinalizedCandidates { dispute_offset }, + finality_delay, + )? + }, } Ok(()) } @@ -184,4 +195,39 @@ mod tests { assert!(run.cli.run.base.bob); }); } + + #[test] + fn dispute_finalized_candidates_works() { + let cli = MalusCli::try_parse_from(IntoIterator::into_iter([ + "malus", + "dispute-finalized-candidates", + "--bob", + ])) + .unwrap(); + assert_matches::assert_matches!(cli, MalusCli { + variant: NemesisVariant::DisputeFinalizedCandidates(run), + .. + } => { + assert!(run.cli.run.base.bob); + }); + } + + #[test] + fn dispute_finalized_offset_value_works() { + let cli = MalusCli::try_parse_from(IntoIterator::into_iter([ + "malus", + "dispute-finalized-candidates", + "--dispute-offset", + "13", + "--bob", + ])) + .unwrap(); + assert_matches::assert_matches!(cli, MalusCli { + variant: NemesisVariant::DisputeFinalizedCandidates(opts), + .. + } => { + assert_eq!(opts.dispute_offset, 13); // This line checks that dispute_offset is correctly set to 13 + assert!(opts.cli.run.base.bob); + }); + } } diff --git a/polkadot/node/malus/src/variants/common.rs b/polkadot/node/malus/src/variants/common.rs index 20b6654638e..92264cd653d 100644 --- a/polkadot/node/malus/src/variants/common.rs +++ b/polkadot/node/malus/src/variants/common.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Implements common code for nemesis. Currently, only `FakeValidationResult` +//! Implements common code for nemesis. Currently, only `ReplaceValidationResult` //! interceptor is implemented. use crate::{ interceptor::*, @@ -188,7 +188,7 @@ where let _candidate_descriptor = candidate_descriptor.clone(); let mut subsystem_sender = subsystem_sender.clone(); let (sender, receiver) = std::sync::mpsc::channel(); - self.spawner.spawn_blocking( + self.spawner.spawn( "malus-get-validation-data", Some("malus"), Box::pin(async move { diff --git a/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs new file mode 100644 index 00000000000..113ab026879 --- /dev/null +++ b/polkadot/node/malus/src/variants/dispute_finalized_candidates.rs @@ -0,0 +1,265 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A malicious node variant that attempts to dispute finalized candidates. +//! +//! This malus variant behaves honestly in backing and approval voting. +//! The maliciousness comes from emitting an extra dispute statement on top of the other ones. +//! +//! Some extra quirks which generally should be insignificant: +//! - The malus node will not dispute at session boundaries +//! - The malus node will not dispute blocks it backed itself +//! - Be cautious about the size of the network to make sure disputes are not auto-confirmed +//! (7 validators is the smallest network size as it needs [(7-1)//3]+1 = 3 votes to get +//! confirmed but it only gets 1 from backing and 1 from malus so 2 in total) +//! +//! +//! Attention: For usage with `zombienet` only! + +#![allow(missing_docs)] + +use futures::channel::oneshot; +use polkadot_cli::{ + prepared_overseer_builder, + service::{ + AuthorityDiscoveryApi, AuxStore, BabeApi, Block, Error, HeaderBackend, Overseer, + OverseerConnector, OverseerGen, OverseerGenArgs, OverseerHandle, ParachainHost, + ProvideRuntimeApi, + }, + Cli, +}; +use polkadot_node_subsystem::{messages::ApprovalVotingMessage, SpawnGlue}; +use polkadot_node_subsystem_types::{DefaultSubsystemClient, OverseerSignal}; +use polkadot_node_subsystem_util::request_candidate_events; +use polkadot_primitives::CandidateEvent; +use sp_core::traits::SpawnNamed; + +// Filter wrapping related types. +use crate::{interceptor::*, shared::MALUS}; + +use std::sync::Arc; + +/// Wraps around ApprovalVotingSubsystem and replaces it. +/// Listens to finalization messages and if possible triggers disputes for their ancestors. +#[derive(Clone)] +struct AncestorDisputer { + spawner: Spawner, //stores the actual ApprovalVotingSubsystem spawner + dispute_offset: u32, /* relative depth of the disputed block to the finalized block, + * 0=finalized, 1=parent of finalized etc */ +} + +impl MessageInterceptor for AncestorDisputer +where + Sender: overseer::ApprovalVotingSenderTrait + Clone + Send + 'static, + Spawner: overseer::gen::Spawner + Clone + 'static, +{ + type Message = ApprovalVotingMessage; + + /// Intercept incoming `OverseerSignal::BlockFinalized' and pass the rest as normal. + fn intercept_incoming( + &self, + subsystem_sender: &mut Sender, + msg: FromOrchestra, + ) -> Option> { + match msg { + FromOrchestra::Communication { msg } => Some(FromOrchestra::Communication { msg }), + FromOrchestra::Signal(OverseerSignal::BlockFinalized( + finalized_hash, + finalized_height, + )) => { + gum::debug!( + target: MALUS, + "😈 Block Finalization Interception! Block: {:?}", finalized_hash, + ); + + //Ensure that the chain is long enough for the target ancestor to exist + if finalized_height <= self.dispute_offset { + return Some(FromOrchestra::Signal(OverseerSignal::BlockFinalized( + finalized_hash, + finalized_height, + ))) + } + + let dispute_offset = self.dispute_offset; + let mut sender = subsystem_sender.clone(); + self.spawner.spawn( + "malus-dispute-finalized-block", + Some("malus"), + Box::pin(async move { + // Query chain for the block hash at the target depth + let (tx, rx) = oneshot::channel(); + sender + .send_message(ChainApiMessage::FinalizedBlockHash( + finalized_height - dispute_offset, + tx, + )) + .await; + let disputable_hash = match rx.await { + Ok(Ok(Some(hash))) => { + gum::debug!( + target: MALUS, + "😈 Time to search {:?}`th ancestor! Block: {:?}", dispute_offset, hash, + ); + hash + }, + _ => { + gum::debug!( + target: MALUS, + "😈 Seems the target is not yet finalized! Nothing to dispute." + ); + return // Early return from the async block + }, + }; + + // Fetch all candidate events for the target ancestor + let events = + request_candidate_events(disputable_hash, &mut sender).await.await; + let events = match events { + Ok(Ok(events)) => events, + Ok(Err(e)) => { + gum::error!( + target: MALUS, + "😈 Failed to fetch candidate events: {:?}", e + ); + return // Early return from the async block + }, + Err(e) => { + gum::error!( + target: MALUS, + "😈 Failed to fetch candidate events: {:?}", e + ); + return // Early return from the async block + }, + }; + + // Extract a token candidate from the events to use for disputing + let event = events.iter().find(|event| { + matches!(event, CandidateEvent::CandidateIncluded(_, _, _, _)) + }); + let candidate = match event { + Some(CandidateEvent::CandidateIncluded(candidate, _, _, _)) => + candidate, + _ => { + gum::error!( + target: MALUS, + "😈 No candidate included event found! Nothing to dispute." + ); + return // Early return from the async block + }, + }; + + // Extract the candidate hash from the candidate + let candidate_hash = candidate.hash(); + + // Fetch the session index for the candidate + let (tx, rx) = oneshot::channel(); + sender + .send_message(RuntimeApiMessage::Request( + disputable_hash, + RuntimeApiRequest::SessionIndexForChild(tx), + )) + .await; + let session_index = match rx.await { + Ok(Ok(session_index)) => session_index, + _ => { + gum::error!( + target: MALUS, + "😈 Failed to fetch session index for candidate." + ); + return // Early return from the async block + }, + }; + gum::info!( + target: MALUS, + "😈 Disputing candidate with hash: {:?} in session {:?}", candidate_hash, session_index, + ); + + // Start dispute + sender.send_unbounded_message( + DisputeCoordinatorMessage::IssueLocalStatement( + session_index, + candidate_hash, + candidate.clone(), + false, // indicates candidate is invalid -> dispute starts + ), + ); + }), + ); + + // Passthrough the finalization signal as usual (using it as hook only) + Some(FromOrchestra::Signal(OverseerSignal::BlockFinalized( + finalized_hash, + finalized_height, + ))) + }, + FromOrchestra::Signal(signal) => Some(FromOrchestra::Signal(signal)), + } + } +} + +//---------------------------------------------------------------------------------- + +#[derive(Debug, clap::Parser)] +#[clap(rename_all = "kebab-case")] +#[allow(missing_docs)] +pub struct DisputeFinalizedCandidatesOptions { + /// relative depth of the disputed block to the finalized block, 0=finalized, 1=parent of + /// finalized etc + #[clap(long, ignore_case = true, default_value_t = 2, value_parser = clap::value_parser!(u32).range(0..=50))] + pub dispute_offset: u32, + + #[clap(flatten)] + pub cli: Cli, +} + +/// DisputeFinalizedCandidates implementation wrapper which implements `OverseerGen` glue. +pub(crate) struct DisputeFinalizedCandidates { + /// relative depth of the disputed block to the finalized block, 0=finalized, 1=parent of + /// finalized etc + pub dispute_offset: u32, +} + +impl OverseerGen for DisputeFinalizedCandidates { + fn generate( + &self, + connector: OverseerConnector, + args: OverseerGenArgs<'_, Spawner, RuntimeClient>, + ) -> Result< + (Overseer, Arc>>, OverseerHandle), + Error, + > + where + RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, + RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + Spawner: 'static + SpawnNamed + Clone + Unpin, + { + gum::info!( + target: MALUS, + "😈 Started Malus node that disputes finalized blocks after they are {:?} finalizations deep.", + &self.dispute_offset, + ); + + let ancestor_disputer = AncestorDisputer { + spawner: SpawnGlue(args.spawner.clone()), + dispute_offset: self.dispute_offset, + }; + + prepared_overseer_builder(args)? + .replace_approval_voting(move |cb| InterceptedSubsystem::new(cb, ancestor_disputer)) + .build_with_connector(connector) + .map_err(|e| e.into()) + } +} diff --git a/polkadot/node/malus/src/variants/mod.rs b/polkadot/node/malus/src/variants/mod.rs index 3789f33ac98..bb4971c145c 100644 --- a/polkadot/node/malus/src/variants/mod.rs +++ b/polkadot/node/malus/src/variants/mod.rs @@ -18,11 +18,13 @@ mod back_garbage_candidate; mod common; +mod dispute_finalized_candidates; mod dispute_valid_candidates; mod suggest_garbage_candidate; pub(crate) use self::{ back_garbage_candidate::{BackGarbageCandidateOptions, BackGarbageCandidates}, + dispute_finalized_candidates::{DisputeFinalizedCandidates, DisputeFinalizedCandidatesOptions}, dispute_valid_candidates::{DisputeAncestorOptions, DisputeValidCandidates}, suggest_garbage_candidate::{SuggestGarbageCandidateOptions, SuggestGarbageCandidates}, }; diff --git a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs index cf0ff5f809d..817afb58437 100644 --- a/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/polkadot/node/malus/src/variants/suggest_garbage_candidate.rs @@ -113,7 +113,7 @@ where let (sender, receiver) = std::sync::mpsc::channel(); let mut new_sender = subsystem_sender.clone(); let _candidate = candidate.clone(); - self.spawner.spawn_blocking( + self.spawner.spawn( "malus-get-validation-data", Some("malus"), Box::pin(async move { diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml new file mode 100644 index 00000000000..69eb0804d8c --- /dev/null +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.toml @@ -0,0 +1,40 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + max_validators_per_core = 1 + needed_approvals = 1 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "honest" + count = 6 + args = ["-lparachain=debug"] + + [[relaychain.nodes]] + image = "{{MALUS_IMAGE}}" + name = "malus" + command = "malus dispute-finalized-candidates" + args = [ "--alice", "-lparachain=debug,MALUS=trace", "--dispute-offset=3" ] + +[[parachains]] +id = 2000 + + [parachains.collator] + image = "{{COL_IMAGE}}" + name = "collator" + command = "undying-collator" + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl new file mode 100644 index 00000000000..62d5a9768f9 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0007-dispute-freshly-finalized.zndsl @@ -0,0 +1,29 @@ +Description: Test if disputes triggered on finalized blocks within scope always end as valid. +Network: ./0007-dispute-freshly-finalized.toml +Creds: config + +# Check authority status and peers. +malus: reports node_roles is 4 +honest: reports node_roles is 4 + +# Ensure parachains are registered. +honest: parachain 2000 is registered within 30 seconds + +# Ensure parachains made progress. +honest: parachain 2000 block height is at least 10 within 200 seconds + +# Ensure that malus is already attempting to dispute +malus: log line contains "😈 Disputing candidate with hash:" within 180 seconds + +# Check if disputes are initiated and concluded. +honest: reports polkadot_parachain_candidate_disputes_total is at least 2 within 100 seconds +honest: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is at least 2 within 100 seconds +honest: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 100 seconds + +# Check lag - approval +honest: reports polkadot_parachain_approval_checking_finality_lag is 0 + +# Check lag - dispute conclusion +honest: reports polkadot_parachain_disputes_finality_lag is 0 + + diff --git a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml new file mode 100644 index 00000000000..1ea385c3a42 --- /dev/null +++ b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.toml @@ -0,0 +1,40 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config] + max_validators_per_core = 1 + needed_approvals = 1 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + +[relaychain.default_resources] +limits = { memory = "4G", cpu = "2" } +requests = { memory = "2G", cpu = "1" } + + [[relaychain.node_groups]] + name = "honest" + count = 6 + args = ["-lparachain=debug"] + + [[relaychain.nodes]] + image = "{{MALUS_IMAGE}}" + name = "malus" + command = "malus dispute-finalized-candidates" + args = [ "--alice", "-lparachain=debug,MALUS=trace", "--dispute-offset=14" ] + +[[parachains]] +id = 2000 + + [parachains.collator] + image = "{{COL_IMAGE}}" + name = "collator" + command = "undying-collator" + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.zndsl b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.zndsl new file mode 100644 index 00000000000..b30c5801a1d --- /dev/null +++ b/polkadot/zombienet_tests/functional/0008-dispute-old-finalized.zndsl @@ -0,0 +1,21 @@ +Description: Test if disputes triggered on finalized blocks out of scope never get to be confirmed and concluded. +Network: ./0008-dispute-old-finalized.toml +Creds: config + +# Check authority status and peers. +malus: reports node_roles is 4 +honest: reports node_roles is 4 + + +# Ensure parachains are registered. +honest: parachain 2000 is registered within 30 seconds + +# Ensure parachains made progress. +honest: parachain 2000 block height is at least 20 within 300 seconds + +# Ensure that malus is already attempting to dispute +malus: log line contains "😈 Disputing candidate with hash:" within 180 seconds + +# Ensure that honest nodes don't participate and conclude any disputes +honest: count of log lines containing "Dispute on candidate concluded" is 0 within 100 seconds + -- GitLab From 0317501758c97c940d3ceec165f22f8b54dee047 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Mon, 27 Nov 2023 11:33:36 +0100 Subject: [PATCH 129/199] Decomissioned PR-Custom-Review (#2503) This PR decomissions [`PR-Custom-Review`](https://github.com/paritytech/pr-custom-review) in replacement for [`Review-bot`](https://github.com/paritytech/review-bot). --- .github/pr-custom-review.yml | 63 -------------------------- .github/workflows/pr-custom-review.yml | 46 ------------------- 2 files changed, 109 deletions(-) delete mode 100644 .github/pr-custom-review.yml delete mode 100644 .github/workflows/pr-custom-review.yml diff --git a/.github/pr-custom-review.yml b/.github/pr-custom-review.yml deleted file mode 100644 index ac13d862a4a..00000000000 --- a/.github/pr-custom-review.yml +++ /dev/null @@ -1,63 +0,0 @@ -# 🔒 PROTECTED: Changes to locks-review-team should be approved by the current locks-review-team -locks-review-team: locks-review -team-leads-team: polkadot-review -action-review-team: ci - -rules: - - name: CI files - check_type: changed_files - condition: - include: ^\.gitlab-ci\.yml|^docker/.*|^\.github/.*|^\.gitlab/.*|^\.config/nextest.toml|^\.cargo/.* - exclude: ^\.gitlab/pipeline/zombienet.* - min_approvals: 2 - teams: - - ci - - release-engineering - - - name: Core developers - check_type: changed_files - condition: - include: .* - # excluding files from 'Runtime files' and 'CI files' rules - exclude: ^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$|^cumulus/parachains/common/src/[^/]+\.rs$|^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*))|^polkadot/runtime/(kusama|polkadot)/src/[^/]+\.rs$|^\.gitlab-ci\.yml|^docker/.*|^\.github/.*|^\.gitlab/.*|^\.config/nextest.toml|^\.cargo/.* - min_approvals: 2 - teams: - - core-devs - - # cumulus - - name: Runtime files cumulus - check_type: changed_files - condition: ^cumulus/parachains/runtimes/assets/(asset-hub-kusama|asset-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/bridge-hubs/(bridge-hub-kusama|bridge-hub-polkadot)/src/[^/]+\.rs$|^cumulus/parachains/runtimes/collectives/collectives-polkadot/src/[^/]+\.rs$|^cumulus/parachains/common/src/[^/]+\.rs$ - all_distinct: - - min_approvals: 1 - teams: - - locks-review - - min_approvals: 1 - teams: - - polkadot-review - - # if there are any changes in the bridges subtree (in case of backport changes back to bridges repo) - - name: Bridges subtree files - check_type: changed_files - condition: ^bridges/.* - min_approvals: 1 - teams: - - bridges-core - - # substrate - - - name: FRAME coders substrate - check_type: changed_files - condition: - include: ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) - all: - - min_approvals: 2 - teams: - - core-devs - - min_approvals: 1 - teams: - - frame-coders - -prevent-review-request: - teams: - - core-devs diff --git a/.github/workflows/pr-custom-review.yml b/.github/workflows/pr-custom-review.yml deleted file mode 100644 index 4e0809cbfdc..00000000000 --- a/.github/workflows/pr-custom-review.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: Assign reviewers - -on: - pull_request: - branches: - - master - - main - types: - - opened - - reopened - - synchronize - - review_requested - - review_request_removed - - ready_for_review - - converted_to_draft - pull_request_review: - merge_group: - -jobs: - pr-custom-review: - runs-on: ubuntu-latest - steps: - - name: Skip merge queue - if: ${{ contains(github.ref, 'gh-readonly-queue') }} - run: exit 0 - - name: Skip if pull request is in Draft - # `if: github.event.pull_request.draft == true` should be kept here, at - # the step level, rather than at the job level. The latter is not - # recommended because when the PR is moved from "Draft" to "Ready to - # review" the workflow will immediately be passing (since it was skipped), - # even though it hasn't actually ran, since it takes a few seconds for - # the workflow to start. This is also disclosed in: - # https://github.community/t/dont-run-actions-on-draft-pull-requests/16817/17 - # That scenario would open an opportunity for the check to be bypassed: - # 1. Get your PR approved - # 2. Move it to Draft - # 3. Push whatever commits you want - # 4. Move it to "Ready for review"; now the workflow is passing (it was - # skipped) and "Check reviews" is also passing (it won't be updated - # until the workflow is finished) - if: github.event.pull_request.draft == true - run: exit 1 - - name: pr-custom-review - uses: paritytech/pr-custom-review@master - with: - checks-reviews-api: http://pcr.parity-prod.parity.io/api/v1/check_reviews -- GitLab From 2610450a18e64079abfe98f0a5b57069bbb61009 Mon Sep 17 00:00:00 2001 From: Koute Date: Mon, 27 Nov 2023 19:40:27 +0900 Subject: [PATCH 130/199] Build the standard library crates when building the runtimes (#2217) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our executor currently only supports the WASM MVP feature set, however nowadays when compiling WASM the Rust compiler has more features enabled by default. We do set the `-C target-cpu=mvp` flag to make sure that *our* code gets compiled in a way that is compatible with our executor, however this doesn't affect Rust's standard library crates (`std`, `core` and `alloc`) which are by default precompiled and still can make use of these extra features. So in this PR we force the compiler to also compile the standard library crates for us to make sure that they also only use the MVP features. I've added the `WASM_BUILD_STD` environment variable which can be used to disable this behavior if set to `0`. Unfortunately this *will* slow down the compile times when building runtimes, but there isn't much that we can do about that. Fixes https://github.com/paritytech/polkadot-sdk/issues/1755 --------- Co-authored-by: Bastian Köcher --- polkadot/parachain/src/lib.rs | 2 - substrate/utils/wasm-builder/README.md | 12 +- substrate/utils/wasm-builder/src/lib.rs | 126 +++--------- .../utils/wasm-builder/src/prerequisites.rs | 192 ++++++++---------- substrate/utils/wasm-builder/src/version.rs | 17 ++ .../utils/wasm-builder/src/wasm_project.rs | 20 ++ 6 files changed, 163 insertions(+), 206 deletions(-) diff --git a/polkadot/parachain/src/lib.rs b/polkadot/parachain/src/lib.rs index 913d887e4a8..bd75296bf83 100644 --- a/polkadot/parachain/src/lib.rs +++ b/polkadot/parachain/src/lib.rs @@ -14,8 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -#![warn(unused_crate_dependencies)] - //! Defines primitive types for creating or validating a parachain. //! //! When compiled with standard library support, this crate exports a `wasm` diff --git a/substrate/utils/wasm-builder/README.md b/substrate/utils/wasm-builder/README.md index db32f5cbc95..6dfc743816c 100644 --- a/substrate/utils/wasm-builder/README.md +++ b/substrate/utils/wasm-builder/README.md @@ -13,7 +13,7 @@ A project that should be compiled as a Wasm binary needs to: The `build.rs` file needs to contain the following code: -```rust +```rust,no_run fn main() { #[cfg(feature = "std")] { @@ -32,7 +32,7 @@ fn main() { As the final step, you need to add the following to your project: -```rust +```rust,ignore include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); ``` @@ -63,11 +63,17 @@ By using environment variables, you can configure which Wasm binaries are built - `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path needs to be absolute. - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. +- `WASM_BUILD_WORKSPACE_HINT` - Hint the workspace that is being built. This is normally not required as we walk up from + the target directory until we find a `Cargo.toml`. If the target directory is changed for + the build, this environment variable can be used to point to the actual workspace. +- `WASM_BUILD_STD` - Sets whether the Rust's standard library crates will also be built. This is necessary to make sure + the standard library crates only use the exact WASM feature set that our executor supports. + Enabled by default. - `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to prevent network access. Useful in offline environments. Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. Where -`PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will be `NODE_RUNTIME`. +`PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `kitchensink-runtime` will be `NODE_RUNTIME`. ## Prerequisites diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index c9011f97be7..cd31a8cba10 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -15,99 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Wasm builder is a utility for building a project as a Wasm binary -//! -//! The Wasm builder is a tool that integrates the process of building the WASM binary of your -//! project into the main `cargo` build process. -//! -//! ## Project setup -//! -//! A project that should be compiled as a Wasm binary needs to: -//! -//! 1. Add a `build.rs` file. -//! 2. Add `wasm-builder` as dependency into `build-dependencies`. -//! -//! The `build.rs` file needs to contain the following code: -//! -//! ```no_run -//! use substrate_wasm_builder::WasmBuilder; -//! -//! fn main() { -//! WasmBuilder::new() -//! // Tell the builder to build the project (crate) this `build.rs` is part of. -//! .with_current_project() -//! // Make sure to export the `heap_base` global, this is required by Substrate -//! .export_heap_base() -//! // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) -//! .import_memory() -//! // Build it. -//! .build() -//! } -//! ``` -//! -//! As the final step, you need to add the following to your project: -//! -//! ```ignore -//! include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -//! ``` -//! -//! This will include the generated Wasm binary as two constants `WASM_BINARY` and -//! `WASM_BINARY_BLOATY`. The former is a compact Wasm binary and the latter is the Wasm binary as -//! being generated by the compiler. Both variables have `Option<&'static [u8]>` as type. -//! -//! ### Feature -//! -//! Wasm builder supports to enable cargo features while building the Wasm binary. By default it -//! will enable all features in the wasm build that are enabled for the native build except the -//! `default` and `std` features. Besides that, wasm builder supports the special `runtime-wasm` -//! feature. This `runtime-wasm` feature will be enabled by the wasm builder when it compiles the -//! Wasm binary. If this feature is not present, it will not be enabled. -//! -//! ## Environment variables -//! -//! By using environment variables, you can configure which Wasm binaries are built and how: -//! -//! - `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be -//! recompiled. If this is the first run and there doesn't exist a Wasm binary, this will set both -//! variables to `None`. -//! - `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are -//! `release` or `debug`. By default the build type is equal to the build type used by the main -//! build. -//! - `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the -//! variable needs to change. As wasm-builder instructs `cargo` to watch for file changes this -//! environment variable should only be required in certain circumstances. -//! - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm -//! binary. -//! - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -//! - `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path -//! needs to be absolute. -//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The -//! format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. -//! - `WASM_BUILD_WORKSPACE_HINT` - Hint the workspace that is being built. This is normally not -//! required as we walk up from the target directory until we find a `Cargo.toml`. If the target -//! directory is changed for the build, this environment variable can be used to point to the -//! actual workspace. -//! - `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to -//! prevent network access. Useful in offline environments. -//! -//! Each project can be skipped individually by using the environment variable -//! `SKIP_PROJECT_NAME_WASM_BUILD`. Where `PROJECT_NAME` needs to be replaced by the name of the -//! cargo project, e.g. `kitchensink-runtime` will be `NODE_RUNTIME`. -//! -//! ## Prerequisites: -//! -//! Wasm builder requires the following prerequisites for building the Wasm binary: -//! -//! - rust nightly + `wasm32-unknown-unknown` toolchain -//! -//! or -//! -//! - rust stable and version at least 1.68.0 + `wasm32-unknown-unknown` toolchain -//! -//! If a specific rust is installed with `rustup`, it is important that the wasm target is -//! installed as well. For example if installing the rust from 20.02.2020 using `rustup -//! install nightly-2020-02-20`, the wasm target needs to be installed as well `rustup target add -//! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. +#![doc = include_str!("../README.md")] use std::{ env, fs, @@ -158,6 +66,9 @@ const FORCE_WASM_BUILD_ENV: &str = "FORCE_WASM_BUILD"; /// Environment variable that hints the workspace we are building. const WASM_BUILD_WORKSPACE_HINT: &str = "WASM_BUILD_WORKSPACE_HINT"; +/// Environment variable to set whether we'll build `core`/`std`. +const WASM_BUILD_STD: &str = "WASM_BUILD_STD"; + /// Write to the given `file` if the `content` is different. fn write_file_if_changed(file: impl AsRef, content: impl AsRef) { if fs::read_to_string(file.as_ref()).ok().as_deref() != Some(content.as_ref()) { @@ -282,6 +193,12 @@ impl CargoCommand { self.version } + /// Returns whether this version of the toolchain supports nightly features. + fn supports_nightly_features(&self) -> bool { + self.version.map_or(false, |version| version.is_nightly) || + env::var("RUSTC_BOOTSTRAP").is_ok() + } + /// Check if the supplied cargo command supports our Substrate wasm environment. /// /// This means that either the cargo version is at minimum 1.68.0 or this is a nightly cargo. @@ -332,3 +249,26 @@ impl std::ops::Deref for CargoCommandVersioned { fn color_output_enabled() -> bool { env::var(crate::WASM_BUILD_NO_COLOR).is_err() } + +/// Fetches a boolean environment variable. Will exit the process if the value is invalid. +fn get_bool_environment_variable(name: &str) -> Option { + let value = env::var_os(name)?; + + // We're comparing `OsString`s here so we can't use a `match`. + if value == "1" { + Some(true) + } else if value == "0" { + Some(false) + } else { + build_helper::warning!( + "the '{}' environment variable has an invalid value; it must be either '1' or '0'", + name + ); + std::process::exit(1); + } +} + +/// Returns whether we need to also compile the standard library when compiling the runtime. +fn build_std_required() -> bool { + crate::get_bool_environment_variable(crate::WASM_BUILD_STD).unwrap_or(true) +} diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index 8e81e6774fa..2cdbdd2798e 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -47,14 +47,11 @@ pub(crate) fn check() -> Result { check_wasm_toolchain_installed(cargo_command) } -/// Create the project that will be used to check that the wasm toolchain is installed and to -/// extract the rustc version. -fn create_check_toolchain_project(project_dir: &Path) { - let lib_rs_file = project_dir.join("src/lib.rs"); - let main_rs_file = project_dir.join("src/main.rs"); - let build_rs_file = project_dir.join("build.rs"); - let manifest_path = project_dir.join("Cargo.toml"); +/// Creates a minimal dummy crate at the given path and returns the manifest path. +fn create_minimal_crate(project_dir: &Path) -> std::path::PathBuf { + fs::create_dir_all(project_dir.join("src")).expect("Creating src dir does not fail; qed"); + let manifest_path = project_dir.join("Cargo.toml"); write_file_if_changed( &manifest_path, r#" @@ -62,120 +59,99 @@ fn create_check_toolchain_project(project_dir: &Path) { name = "wasm-test" version = "1.0.0" edition = "2021" - build = "build.rs" - - [lib] - name = "wasm_test" - crate-type = ["cdylib"] [workspace] "#, ); - write_file_if_changed(lib_rs_file, "pub fn test() {}"); - - // We want to know the rustc version of the rustc that is being used by our cargo command. - // The cargo command is determined by some *very* complex algorithm to find the cargo command - // that supports nightly. - // The best solution would be if there is a `cargo rustc --version` command, which sadly - // doesn't exists. So, the only available way of getting the rustc version is to build a project - // and capture the rustc version in this build process. This `build.rs` is exactly doing this. - // It gets the rustc version by calling `rustc --version` and exposing it in the `RUSTC_VERSION` - // environment variable. - write_file_if_changed( - build_rs_file, - r#" - fn main() { - let rustc_cmd = std::env::var("RUSTC").ok().unwrap_or_else(|| "rustc".into()); - - let rustc_version = std::process::Command::new(rustc_cmd) - .arg("--version") - .output() - .ok() - .and_then(|o| String::from_utf8(o.stdout).ok()); - - println!( - "cargo:rustc-env=RUSTC_VERSION={}", - rustc_version.unwrap_or_else(|| "unknown rustc version".into()), - ); - } - "#, - ); - // Just prints the `RURSTC_VERSION` environment variable that is being created by the - // `build.rs` script. - write_file_if_changed( - main_rs_file, - r#" - fn main() { - println!("{}", env!("RUSTC_VERSION")); - } - "#, - ); + + write_file_if_changed(project_dir.join("src/main.rs"), "fn main() {}"); + manifest_path } fn check_wasm_toolchain_installed( cargo_command: CargoCommand, ) -> Result { let temp = tempdir().expect("Creating temp dir does not fail; qed"); - fs::create_dir_all(temp.path().join("src")).expect("Creating src dir does not fail; qed"); - create_check_toolchain_project(temp.path()); - - let err_msg = print_error_message("Rust WASM toolchain not installed, please install it!"); - let manifest_path = temp.path().join("Cargo.toml").display().to_string(); - - let mut build_cmd = cargo_command.command(); - // Chdir to temp to avoid including project's .cargo/config.toml - // by accident - it can happen in some CI environments. - build_cmd.current_dir(&temp); - build_cmd.args(&[ - "build", - "--target=wasm32-unknown-unknown", - "--manifest-path", - &manifest_path, - ]); + let manifest_path = create_minimal_crate(temp.path()).display().to_string(); + + let prepare_command = |subcommand| { + let mut cmd = cargo_command.command(); + // Chdir to temp to avoid including project's .cargo/config.toml + // by accident - it can happen in some CI environments. + cmd.current_dir(&temp); + cmd.args(&[ + subcommand, + "--target=wasm32-unknown-unknown", + "--manifest-path", + &manifest_path, + ]); + + if super::color_output_enabled() { + cmd.arg("--color=always"); + } - if super::color_output_enabled() { - build_cmd.arg("--color=always"); + // manually set the `CARGO_TARGET_DIR` to prevent a cargo deadlock + let target_dir = temp.path().join("target").display().to_string(); + cmd.env("CARGO_TARGET_DIR", &target_dir); + + // Make sure the host's flags aren't used here, e.g. if an alternative linker is specified + // in the RUSTFLAGS then the check we do here will break unless we clear these. + cmd.env_remove("CARGO_ENCODED_RUSTFLAGS"); + cmd.env_remove("RUSTFLAGS"); + cmd + }; + + let err_msg = + print_error_message("Rust WASM toolchain is not properly installed; please install it!"); + let build_result = prepare_command("build").output().map_err(|_| err_msg.clone())?; + if !build_result.status.success() { + return match String::from_utf8(build_result.stderr) { + Ok(ref err) if err.contains("the `wasm32-unknown-unknown` target may not be installed") => + Err(print_error_message("Cannot compile the WASM runtime: the `wasm32-unknown-unknown` target is not installed!\n\ + You can install it with `rustup target add wasm32-unknown-unknown` if you're using `rustup`.")), + + // Apparently this can happen when we're running on a non Tier 1 platform. + Ok(ref err) if err.contains("linker `rust-lld` not found") => + Err(print_error_message("Cannot compile the WASM runtime: `rust-lld` not found!")), + + Ok(ref err) => Err(format!( + "{}\n\n{}\n{}\n{}{}\n", + err_msg, + Color::Yellow.bold().paint("Further error information:"), + Color::Yellow.bold().paint("-".repeat(60)), + err, + Color::Yellow.bold().paint("-".repeat(60)), + )), + + Err(_) => Err(err_msg), + }; } - let mut run_cmd = cargo_command.command(); - // Chdir to temp to avoid including project's .cargo/config.toml - // by accident - it can happen in some CI environments. - run_cmd.current_dir(&temp); - run_cmd.args(&["run", "--manifest-path", &manifest_path]); - - // manually set the `CARGO_TARGET_DIR` to prevent a cargo deadlock - let target_dir = temp.path().join("target").display().to_string(); - build_cmd.env("CARGO_TARGET_DIR", &target_dir); - run_cmd.env("CARGO_TARGET_DIR", &target_dir); - - // Make sure the host's flags aren't used here, e.g. if an alternative linker is specified - // in the RUSTFLAGS then the check we do here will break unless we clear these. - build_cmd.env_remove("CARGO_ENCODED_RUSTFLAGS"); - run_cmd.env_remove("CARGO_ENCODED_RUSTFLAGS"); - build_cmd.env_remove("RUSTFLAGS"); - run_cmd.env_remove("RUSTFLAGS"); - - build_cmd.output().map_err(|_| err_msg.clone()).and_then(|s| { - if s.status.success() { - let version = run_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()); - Ok(CargoCommandVersioned::new( - cargo_command, - version.unwrap_or_else(|| "unknown rustc version".into()), - )) - } else { - match String::from_utf8(s.stderr) { - Ok(ref err) if err.contains("linker `rust-lld` not found") => - Err(print_error_message("`rust-lld` not found, please install it!")), - Ok(ref err) => Err(format!( - "{}\n\n{}\n{}\n{}{}\n", - err_msg, - Color::Yellow.bold().paint("Further error information:"), - Color::Yellow.bold().paint("-".repeat(60)), - err, - Color::Yellow.bold().paint("-".repeat(60)), - )), - Err(_) => Err(err_msg), + let mut run_cmd = prepare_command("rustc"); + run_cmd.args(&["-q", "--", "--version"]); + + let version = run_cmd + .output() + .ok() + .and_then(|o| String::from_utf8(o.stdout).ok()) + .unwrap_or_else(|| "unknown rustc version".into()); + + if crate::build_std_required() { + let mut sysroot_cmd = prepare_command("rustc"); + sysroot_cmd.args(&["-q", "--", "--print", "sysroot"]); + if let Some(sysroot) = + sysroot_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()) + { + let src_path = + Path::new(sysroot.trim()).join("lib").join("rustlib").join("src").join("rust"); + if !src_path.exists() { + return Err(print_error_message( + "Cannot compile the WASM runtime: no standard library sources found!\n\ + You can install them with `rustup component add rust-src` if you're using `rustup`.", + )) } } - }) + } + + Ok(CargoCommandVersioned::new(cargo_command, version)) } diff --git a/substrate/utils/wasm-builder/src/version.rs b/substrate/utils/wasm-builder/src/version.rs index e4f7d98be61..3a0a306d737 100644 --- a/substrate/utils/wasm-builder/src/version.rs +++ b/substrate/utils/wasm-builder/src/version.rs @@ -212,4 +212,21 @@ mod tests { version_1_69_0, ); } + + #[test] + fn parse_rustc_version() { + let version = Version::extract("rustc 1.73.0 (cc66ad468 2023-10-03)").unwrap(); + assert_eq!( + version, + Version { + major: 1, + minor: 73, + patch: 0, + is_nightly: false, + year: Some(2023), + month: Some(10), + day: Some(03), + } + ); + } } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 2e6f671c45e..5bf44c2c9b2 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -750,6 +750,25 @@ fn build_bloaty_blob( build_cmd.arg("--offline"); } + // Our executor currently only supports the WASM MVP feature set, however nowadays + // when compiling WASM the Rust compiler has more features enabled by default. + // + // We do set the `-C target-cpu=mvp` flag to make sure that *our* code gets compiled + // in a way that is compatible with our executor, however this doesn't affect Rust's + // standard library crates (`std`, `core` and `alloc`) which are by default precompiled + // and still can make use of these extra features. + // + // So here we force the compiler to also compile the standard library crates for us + // to make sure that they also only use the MVP features. + if crate::build_std_required() { + // Unfortunately this is still a nightly-only flag, but FWIW it is pretty widely used + // so it's unlikely to break without a replacement. + build_cmd.arg("-Z").arg("build-std"); + if !cargo_cmd.supports_nightly_features() { + build_cmd.env("RUSTC_BOOTSTRAP", "1"); + } + } + println!("{}", colorize_info_message("Information that should be included in a bug report.")); println!("{} {:?}", colorize_info_message("Executing build command:"), build_cmd); println!("{} {}", colorize_info_message("Using rustc version:"), cargo_cmd.rustc_version()); @@ -952,6 +971,7 @@ fn generate_rerun_if_changed_instructions( println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_RUSTFLAGS_ENV); println!("cargo:rerun-if-env-changed={}", crate::WASM_TARGET_DIRECTORY); println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TOOLCHAIN); + println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_STD); } /// Track files and paths related to the given package to rerun `build.rs` on any relevant change. -- GitLab From 4298bc608fa8e5d8b8fb1ca0c1028613d82bc99b Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Mon, 27 Nov 2023 14:22:13 +0200 Subject: [PATCH 131/199] asset-hub-westend-integration-tests: add more asset transfers tests (#2488) Just adds more tests. --- Cargo.lock | 2 + .../assets/asset-hub-rococo/src/lib.rs | 2 +- .../assets/asset-hub-westend/src/lib.rs | 2 +- .../bridges/bridge-hub-rococo/src/lib.rs | 2 +- .../bridges/bridge-hub-westend/src/lib.rs | 2 +- .../parachains/testing/penpal/Cargo.toml | 1 + .../parachains/testing/penpal/src/lib.rs | 6 +- .../emulated/common/src/impls.rs | 12 +- .../src/tests/reserve_transfer.rs | 43 +-- .../assets/asset-hub-rococo/src/tests/swap.rs | 6 - .../tests/assets/asset-hub-westend/Cargo.toml | 1 + .../tests/assets/asset-hub-westend/src/lib.rs | 32 +- .../src/tests/reserve_transfer.rs | 337 ++++++++++++++++-- .../asset-hub-westend/src/tests/send.rs | 12 +- .../asset-hub-westend/src/tests/swap.rs | 24 +- .../asset-hub-westend/src/tests/teleport.rs | 45 +-- 16 files changed, 402 insertions(+), 127 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a3f8e05fbc..f99b579bfe9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1028,6 +1028,7 @@ dependencies = [ "pallet-xcm", "parachains-common", "parity-scale-codec", + "penpal-runtime", "polkadot-runtime-common", "sp-runtime", "staging-xcm", @@ -11746,6 +11747,7 @@ dependencies = [ "serde_json", "sp-core", "sp-runtime", + "westend-emulated-chain", ] [[package]] diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 1ed22b3cc4f..877edceae32 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -52,6 +52,6 @@ decl_test_parachains! { // AssetHubRococo implementation impl_accounts_helpers_for_parachain!(AssetHubRococo); -impl_assert_events_helpers_for_parachain!(AssetHubRococo, false); +impl_assert_events_helpers_for_parachain!(AssetHubRococo); impl_assets_helpers_for_parachain!(AssetHubRococo, Rococo); impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, Rococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 4dcdb613ac2..1c017c63c6f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -52,6 +52,6 @@ decl_test_parachains! { // AssetHubWestend implementation impl_accounts_helpers_for_parachain!(AssetHubWestend); -impl_assert_events_helpers_for_parachain!(AssetHubWestend, false); +impl_assert_events_helpers_for_parachain!(AssetHubWestend); impl_assets_helpers_for_parachain!(AssetHubWestend, Westend); impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, Westend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs index ea0c9513abc..8e5b29e6561 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-rococo/src/lib.rs @@ -46,4 +46,4 @@ decl_test_parachains! { // BridgeHubRococo implementation impl_accounts_helpers_for_parachain!(BridgeHubRococo); -impl_assert_events_helpers_for_parachain!(BridgeHubRococo, false); +impl_assert_events_helpers_for_parachain!(BridgeHubRococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs index 4a130ac1f27..a774f31b0fb 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/bridges/bridge-hub-westend/src/lib.rs @@ -46,4 +46,4 @@ decl_test_parachains! { // BridgeHubWestend implementation impl_accounts_helpers_for_parachain!(BridgeHubWestend); -impl_assert_events_helpers_for_parachain!(BridgeHubWestend, false); +impl_assert_events_helpers_for_parachain!(BridgeHubWestend); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml index c55b10d7180..5886158c263 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/Cargo.toml @@ -23,3 +23,4 @@ cumulus-primitives-core = { path = "../../../../../../../primitives/core", defau emulated-integration-tests-common = { path = "../../../../common", default-features = false } penpal-runtime = { path = "../../../../../../runtimes/testing/penpal" } rococo-emulated-chain = { path = "../../../relays/rococo" } +westend-emulated-chain = { path = "../../../relays/westend" } diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs index f9a422bfcba..c76120adb79 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/lib.rs @@ -25,6 +25,7 @@ use emulated_integration_tests_common::{ impl_assets_helpers_for_parachain, impls::Parachain, xcm_emulator::decl_test_parachains, }; use rococo_emulated_chain::Rococo; +use westend_emulated_chain::Westend; // Penpal Parachain declaration decl_test_parachains! { @@ -67,5 +68,6 @@ decl_test_parachains! { // Penpal implementation impl_accounts_helpers_for_parachain!(PenpalA); impl_assets_helpers_for_parachain!(PenpalA, Rococo); -impl_assert_events_helpers_for_parachain!(PenpalA, true); -impl_assert_events_helpers_for_parachain!(PenpalB, true); +impl_assets_helpers_for_parachain!(PenpalB, Westend); +impl_assert_events_helpers_for_parachain!(PenpalA); +impl_assert_events_helpers_for_parachain!(PenpalB); diff --git a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs index 8c94df6d888..768784ac067 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/impls.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/impls.rs @@ -418,7 +418,7 @@ macro_rules! impl_accounts_helpers_for_parachain { #[macro_export] macro_rules! impl_assert_events_helpers_for_parachain { - ( $chain:ident, $ignore_weight:expr ) => { + ( $chain:ident ) => { $crate::impls::paste::paste! { type [<$chain RuntimeEvent>] = <$chain as $crate::impls::Chain>::RuntimeEvent; @@ -431,7 +431,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Complete(weight) } ) => { - weight: $ignore_weight || $crate::impls::weight_within_threshold( + weight: $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -453,7 +453,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::PolkadotXcm( $crate::impls::pallet_xcm::Event::Attempted { outcome: $crate::impls::Outcome::Incomplete(weight, error) } ) => { - weight: $ignore_weight || $crate::impls::weight_within_threshold( + weight: $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -509,7 +509,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. }) => { - weight: $ignore_weight || $crate::impls::weight_within_threshold( + weight: $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -529,7 +529,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: false, weight_used: weight, .. }) => { - weight: $ignore_weight || $crate::impls::weight_within_threshold( + weight: $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight @@ -560,7 +560,7 @@ macro_rules! impl_assert_events_helpers_for_parachain { vec![ [<$chain RuntimeEvent>]::::MessageQueue($crate::impls::pallet_message_queue::Event::Processed { success: true, weight_used: weight, .. } ) => { - weight: $ignore_weight || $crate::impls::weight_within_threshold( + weight: $crate::impls::weight_within_threshold( ($crate::impls::REF_TIME_THRESHOLD, $crate::impls::PROOF_SIZE_THRESHOLD), expected_weight.unwrap_or(*weight), *weight diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index d0e9b72176b..18483762ae1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -40,19 +40,6 @@ fn relay_to_para_sender_assertions(t: RelayToParaTest) { ); } -fn relay_to_para_receiver_assertions(_: Test) { - type RuntimeEvent = ::RuntimeEvent; - assert_expected_events!( - PenpalA, - vec![ - RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, - RuntimeEvent::MessageQueue( - pallet_message_queue::Event::Processed { success: true, .. } - ) => {}, - ] - ); -} - fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -78,7 +65,7 @@ fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { ); } -fn system_para_to_para_receiver_assertions(_: Test) { +fn para_receiver_assertions(_: Test) { type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( PenpalA, @@ -297,7 +284,7 @@ fn reserve_transfer_native_asset_from_relay_to_para() { let receiver_balance_before = test.receiver.balance; test.set_assertion::(relay_to_para_sender_assertions); - test.set_assertion::(relay_to_para_receiver_assertions); + test.set_assertion::(para_receiver_assertions); test.set_dispatchable::(relay_to_para_limited_reserve_transfer_assets); test.assert(); @@ -314,6 +301,10 @@ fn reserve_transfer_native_asset_from_relay_to_para() { assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } /// Reserve Transfers of native asset from System Parachain to Parachain should work @@ -337,7 +328,7 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let receiver_balance_before = test.receiver.balance; test.set_assertion::(system_para_to_para_sender_assertions); - test.set_assertion::(system_para_to_para_receiver_assertions); + test.set_assertion::(para_receiver_assertions); test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); test.assert(); @@ -354,6 +345,10 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } /// Reserve Transfers of native asset from Parachain to System Parachain should work @@ -400,6 +395,10 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } /// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should @@ -420,7 +419,7 @@ fn reserve_transfer_assets_from_system_para_to_para() { ASSET_MIN_BALANCE, false, PenpalASender::get(), - Some(Weight::from_parts(1_019_445_000, 200_000)), + None, 0, ); @@ -485,6 +484,10 @@ fn reserve_transfer_assets_from_system_para_to_para() { assert!(sender_balance_after < sender_balance_before); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + fee_amount_to_send); let sender_assets_after = AssetHubRococo::execute_with(|| { type Assets = ::Assets; @@ -495,8 +498,8 @@ fn reserve_transfer_assets_from_system_para_to_para() { >::balance(ASSET_ID, &PenpalAReceiver::get()) }); - // Sender's balance is reduced + // Sender's balance is reduced by exact amount assert_eq!(sender_assets_before - asset_amount_to_send, sender_assets_after); - // Receiver's balance is increased - assert!(receiver_assets_after > receiver_assets_before); + // Receiver's balance is increased by exact amount + assert_eq!(receiver_assets_after, receiver_assets_before + asset_amount_to_send); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index e08af50c14e..9e691bf02f1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -44,12 +44,6 @@ fn swap_locally_on_chain_using_local_assets() { 100_000_000_000_000, )); - assert_ok!(::Balances::force_set_balance( - ::RuntimeOrigin::root(), - AssetHubRococoSender::get().into(), - 100_000_000_000_000, - )); - assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), asset_native.clone(), diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 7080abc0a44..4c7537bfd53 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -38,4 +38,5 @@ asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../../pallets/dmp-queue" } cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } emulated-integration-tests-common = { path = "../../../common", default-features = false} +penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } westend-system-emulated-network ={ path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 83a867e6ae3..e2c03d2f8f0 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -47,11 +47,12 @@ pub use westend_system_emulated_network::{ asset_hub_westend_emulated_chain::{ genesis::ED as ASSET_HUB_WESTEND_ED, AssetHubWestendParaPallet as AssetHubWestendPallet, }, - penpal_emulated_chain::PenpalAParaPallet as PenpalAPallet, + penpal_emulated_chain::PenpalBParaPallet as PenpalBPallet, westend_emulated_chain::{genesis::ED as WESTEND_ED, WestendRelayPallet as WestendPallet}, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, - AssetHubWestendParaSender as AssetHubWestendSender, PenpalAPara as PenpalA, - PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, + AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubWestendPara as BridgeHubWestend, + BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, PenpalBPara as PenpalB, + PenpalBParaReceiver as PenpalBReceiver, PenpalBParaSender as PenpalBSender, WestendRelay as Westend, WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, }; @@ -62,18 +63,20 @@ pub const ASSET_MIN_BALANCE: u128 = 1000; pub const ASSETS_PALLET_ID: u8 = 50; pub type RelayToSystemParaTest = Test; +pub type RelayToParaTest = Test; pub type SystemParaToRelayTest = Test; -pub type SystemParaToParaTest = Test; +pub type SystemParaToParaTest = Test; +pub type ParaToSystemParaTest = Test; /// Returns a `TestArgs` instance to be used for the Relay Chain across integration tests -pub fn relay_test_args(amount: Balance) -> TestArgs { +pub fn relay_test_args( + dest: MultiLocation, + beneficiary_id: AccountId32, + amount: Balance, +) -> TestArgs { TestArgs { - dest: Westend::child_location_of(AssetHubWestend::para_id()), - beneficiary: AccountId32Junction { - network: None, - id: AssetHubWestendReceiver::get().into(), - } - .into(), + dest, + beneficiary: AccountId32Junction { network: None, id: beneficiary_id.into() }.into(), amount, assets: (Here, amount).into(), asset_id: None, @@ -82,13 +85,14 @@ pub fn relay_test_args(amount: Balance) -> TestArgs { } } -/// Returns a `TestArgs` instance to be used for the System Parachain across integration tests -pub fn system_para_test_args( +/// Returns a `TestArgs` instance to be used by parachains across integration tests +pub fn para_test_args( dest: MultiLocation, beneficiary_id: AccountId32, amount: Balance, assets: MultiAssets, asset_id: Option, + fee_asset_item: u32, ) -> TestArgs { TestArgs { dest, @@ -96,7 +100,7 @@ pub fn system_para_test_args( amount, assets, asset_id, - fee_asset_item: 0, + fee_asset_item, weight_limit: WeightLimit::Unlimited, } } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 5b2c648b7b0..1a69a4f3f71 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -14,7 +14,31 @@ // limitations under the License. use crate::*; -use asset_hub_westend_runtime::xcm_config::XcmConfig; +use asset_hub_westend_runtime::xcm_config::XcmConfig as AssetHubWestendXcmConfig; +use penpal_runtime::xcm_config::XcmConfig as PenpalWestendXcmConfig; +use westend_runtime::xcm_config::XcmConfig as WestendXcmConfig; + +fn relay_to_para_sender_assertions(t: RelayToParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + Westend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + + assert_expected_events!( + Westend, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Transfer { from, to, amount } + ) => { + from: *from == t.sender.account_id, + to: *to == Westend::sovereign_account_id_of( + t.args.dest + ), + amount: *amount == t.args.amount, + }, + ] + ); +} fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; @@ -42,10 +66,54 @@ fn system_para_to_para_sender_assertions(t: SystemParaToParaTest) { } fn para_receiver_assertions(_: Test) { - type RuntimeEvent = ::RuntimeEvent; + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn para_to_system_para_sender_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + PenpalB::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(864_610_000, 8_799))); + + assert_expected_events!( + PenpalB, + vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == t.sender.account_id, + amount: *amount == t.args.amount, + }, + ] + ); +} + +fn para_to_system_para_receiver_assertions(t: ParaToSystemParaTest) { + type RuntimeEvent = ::RuntimeEvent; + + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of( + AssetHubWestend::sibling_location_of(PenpalB::para_id()), + ); + assert_expected_events!( - PenpalA, + AssetHubWestend, vec![ + // Amount to reserve transfer is transferred to Parachain's Sovereign account + RuntimeEvent::Balances( + pallet_balances::Event::Withdraw { who, amount } + ) => { + who: *who == sov_penpal_on_ahw.clone().into(), + amount: *amount == t.args.amount, + }, RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, RuntimeEvent::MessageQueue( pallet_message_queue::Event::Processed { success: true, .. } @@ -54,7 +122,7 @@ fn para_receiver_assertions(_: Test) { ); } -fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { +fn system_para_to_para_assets_sender_assertions(t: SystemParaToParaTest) { type RuntimeEvent = ::RuntimeEvent; AssetHubWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( @@ -80,6 +148,31 @@ fn system_para_to_para_assets_assertions(t: SystemParaToParaTest) { ); } +fn system_para_to_para_assets_receiver_assertions(_: Test) { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + PenpalB, + vec![ + RuntimeEvent::Balances(pallet_balances::Event::Deposit { .. }) => {}, + RuntimeEvent::Assets(pallet_assets::Event::Issued { .. }) => {}, + RuntimeEvent::MessageQueue( + pallet_message_queue::Event::Processed { success: true, .. } + ) => {}, + ] + ); +} + +fn relay_to_para_limited_reserve_transfer_assets(t: RelayToParaTest) -> DispatchResult { + ::XcmPallet::limited_reserve_transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) -> DispatchResult { ::PolkadotXcm::limited_reserve_transfer_assets( t.signed_origin, @@ -91,6 +184,17 @@ fn system_para_to_para_limited_reserve_transfer_assets(t: SystemParaToParaTest) ) } +fn para_to_system_para_limited_reserve_transfer_assets(t: ParaToSystemParaTest) -> DispatchResult { + ::PolkadotXcm::limited_reserve_transfer_assets( + t.signed_origin, + bx!(t.args.dest.into()), + bx!(t.args.beneficiary.into()), + bx!(t.args.assets.into()), + t.args.fee_asset_item, + t.args.weight_limit, + ) +} + /// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work #[test] fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { @@ -159,19 +263,62 @@ fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { }); } +/// Reserve Transfers of native asset from Relay to Parachain should work +#[test] +fn reserve_transfer_native_asset_from_relay_to_para() { + // Init values for Relay + let destination = Westend::child_location_of(PenpalB::para_id()); + let beneficiary_id = PenpalBReceiver::get(); + let amount_to_send: Balance = WESTEND_ED * 1000; + + let test_args = TestContext { + sender: WestendSender::get(), + receiver: PenpalBReceiver::get(), + args: relay_test_args(destination, beneficiary_id, amount_to_send), + }; + + let mut test = RelayToParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + test.set_assertion::(relay_to_para_sender_assertions); + test.set_assertion::(para_receiver_assertions); + test.set_dispatchable::(relay_to_para_limited_reserve_transfer_assets); + test.assert(); + + let delivery_fees = Westend::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); +} + /// Reserve Transfers of native asset from System Parachain to Parachain should work #[test] fn reserve_transfer_native_asset_from_system_para_to_para() { // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); + let destination = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let beneficiary_id = PenpalBReceiver::get(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; let assets = (Parent, amount_to_send).into(); let test_args = TestContext { sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + receiver: PenpalBReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToParaTest::new(test_args); @@ -180,7 +327,7 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let receiver_balance_before = test.receiver.balance; test.set_assertion::(system_para_to_para_sender_assertions); - test.set_assertion::(para_receiver_assertions); + test.set_assertion::(para_receiver_assertions); test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); test.assert(); @@ -188,53 +335,171 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { let receiver_balance_after = test.receiver.balance; let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::transfer_assets_delivery_fees::<::XcmSender>( - test.args.assets.clone(), - 0, - test.args.weight_limit, - test.args.beneficiary, - test.args.dest, - ) + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) }); // Sender's balance is reduced assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); // Receiver's balance is increased assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); } -/// Reserve Transfers of a local asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from Parachain to System Parachain should work #[test] -fn reserve_transfer_asset_from_system_para_to_para() { - // Force create asset from Relay Chain and mint assets for System Parachain's sender account +fn reserve_transfer_native_asset_from_para_to_system_para() { + // Init values for Penpal Parachain + let destination = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let beneficiary_id = AssetHubWestendReceiver::get(); + let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; + let assets = (Parent, amount_to_send).into(); + + let test_args = TestContext { + sender: PenpalBSender::get(), + receiver: AssetHubWestendReceiver::get(), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), + }; + + let mut test = ParaToSystemParaTest::new(test_args); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + let penpal_location_as_seen_by_ahw = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahw = + AssetHubWestend::sovereign_account_id_of(penpal_location_as_seen_by_ahw); + + // fund the Penpal's SA on AHW with the native tokens held in reserve + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), amount_to_send * 2)]); + + test.set_assertion::(para_to_system_para_sender_assertions); + test.set_assertion::(para_to_system_para_receiver_assertions); + test.set_dispatchable::(para_to_system_para_limited_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + let delivery_fees = PenpalB::execute_with(|| { + xcm_helpers::transfer_assets_delivery_fees::< + ::XcmSender, + >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) + }); + + // Sender's balance is reduced + assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + amount_to_send); +} + +/// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should +/// work +#[test] +fn reserve_transfer_assets_from_system_para_to_para() { + // Force create asset on AssetHubWestend and PenpalB from Relay Chain AssetHubWestend::force_create_and_mint_asset( ASSET_ID, ASSET_MIN_BALANCE, true, AssetHubWestendSender::get(), Some(Weight::from_parts(1_019_445_000, 200_000)), - ASSET_MIN_BALANCE * 1000000, + ASSET_MIN_BALANCE * 1_000_000, + ); + PenpalB::force_create_and_mint_asset( + ASSET_ID, + ASSET_MIN_BALANCE, + false, + PenpalBSender::get(), + None, + 0, ); // Init values for System Parachain - let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); - let beneficiary_id = PenpalAReceiver::get(); - let amount_to_send = ASSET_MIN_BALANCE * 1000; - let assets = - (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), amount_to_send) - .into(); - - let system_para_test_args = TestContext { + let destination = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let beneficiary_id = PenpalBReceiver::get(); + let fee_amount_to_send = ASSET_HUB_WESTEND_ED * 1000; + let asset_amount_to_send = ASSET_MIN_BALANCE * 1000; + let assets: MultiAssets = vec![ + (Parent, fee_amount_to_send).into(), + (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), asset_amount_to_send) + .into(), + ] + .into(); + let fee_asset_index = assets + .inner() + .iter() + .position(|r| r == &(Parent, fee_amount_to_send).into()) + .unwrap() as u32; + + let para_test_args = TestContext { sender: AssetHubWestendSender::get(), - receiver: PenpalAReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + receiver: PenpalBReceiver::get(), + args: para_test_args( + destination, + beneficiary_id, + asset_amount_to_send, + assets, + None, + fee_asset_index, + ), }; - let mut system_para_test = SystemParaToParaTest::new(system_para_test_args); + let mut test = SystemParaToParaTest::new(para_test_args); + + // Create SA-of-Penpal-on-AHW with ED. + let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); + let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location); + AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahw.into(), WESTEND_ED)]); + + let sender_balance_before = test.sender.balance; + let receiver_balance_before = test.receiver.balance; + + let sender_assets_before = AssetHubWestend::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubWestendSender::get()) + }); + let receiver_assets_before = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalBReceiver::get()) + }); + + test.set_assertion::(system_para_to_para_assets_sender_assertions); + test.set_assertion::(system_para_to_para_assets_receiver_assertions); + test.set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); + test.assert(); + + let sender_balance_after = test.sender.balance; + let receiver_balance_after = test.receiver.balance; + + // Sender's balance is reduced + assert!(sender_balance_after < sender_balance_before); + // Receiver's balance is increased + assert!(receiver_balance_after > receiver_balance_before); + // Receiver's balance increased by `amount_to_send - delivery_fees - bought_execution`; + // `delivery_fees` might be paid from transfer or JIT, also `bought_execution` is unknown but + // should be non-zero + assert!(receiver_balance_after < receiver_balance_before + fee_amount_to_send); + + let sender_assets_after = AssetHubWestend::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &AssetHubWestendSender::get()) + }); + let receiver_assets_after = PenpalB::execute_with(|| { + type Assets = ::Assets; + >::balance(ASSET_ID, &PenpalBReceiver::get()) + }); - system_para_test.set_assertion::(system_para_to_para_assets_assertions); - // TODO: Add assertions when Penpal is able to manage assets - system_para_test - .set_dispatchable::(system_para_to_para_limited_reserve_transfer_assets); - system_para_test.assert(); + // Sender's balance is reduced by exact amount + assert_eq!(sender_assets_before - asset_amount_to_send, sender_assets_after); + // Receiver's balance is increased by exact amount + assert_eq!(receiver_assets_after, receiver_assets_before + asset_amount_to_send); } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index bda9a3e69c4..4b98eeb0ed3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -33,7 +33,7 @@ fn send_transact_as_superuser_from_relay_to_system_para_works() { #[test] fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let para_sovereign_account = AssetHubWestend::sovereign_account_id_of( - AssetHubWestend::sibling_location_of(PenpalA::para_id()), + AssetHubWestend::sibling_location_of(PenpalB::para_id()), ); // Force create and mint assets for Parachain's sovereign account @@ -60,8 +60,8 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { let native_asset = (X2(PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into())), fee_amount).into(); - let root_origin = ::RuntimeOrigin::root(); - let system_para_destination = PenpalA::sibling_location_of(AssetHubWestend::para_id()).into(); + let root_origin = ::RuntimeOrigin::root(); + let system_para_destination = PenpalB::sibling_location_of(AssetHubWestend::para_id()).into(); let xcm = xcm_transact_paid_execution( call, origin_kind, @@ -69,14 +69,14 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { para_sovereign_account.clone(), ); - PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + PenpalB::execute_with(|| { + assert_ok!(::PolkadotXcm::send( root_origin, bx!(system_para_destination), bx!(xcm), )); - PenpalA::assert_xcm_pallet_sent(); + PenpalB::assert_xcm_pallet_sent(); }); AssetHubWestend::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index a8e19f9ef4b..aca60f68802 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -114,7 +114,7 @@ fn swap_locally_on_chain_using_foreign_assets() { let foreign_asset1_at_asset_hub_westend = Box::new(MultiLocation { parents: 1, interior: X3( - Parachain(PenpalA::para_id().into()), + Parachain(PenpalB::para_id().into()), PalletInstance(ASSETS_PALLET_ID), GeneralIndex(ASSET_ID.into()), ), @@ -125,18 +125,18 @@ fn swap_locally_on_chain_using_foreign_assets() { .into(); let penpal_location = - MultiLocation { parents: 1, interior: X1(Parachain(PenpalA::para_id().into())) }; + MultiLocation { parents: 1, interior: X1(Parachain(PenpalB::para_id().into())) }; // 1. Create asset on penpal: - PenpalA::execute_with(|| { - assert_ok!(::Assets::create( - ::RuntimeOrigin::signed(PenpalASender::get()), + PenpalB::execute_with(|| { + assert_ok!(::Assets::create( + ::RuntimeOrigin::signed(PenpalBSender::get()), ASSET_ID.into(), - PenpalASender::get().into(), + PenpalBSender::get().into(), 1000, )); - assert!(::Assets::asset_exists(ASSET_ID)); + assert!(::Assets::asset_exists(ASSET_ID)); }); // 2. Create foreign asset on asset_hub_westend: @@ -190,18 +190,18 @@ fn swap_locally_on_chain_using_foreign_assets() { ])); // Send XCM message from penpal => asset_hub_westend - let sudo_penpal_origin = ::RuntimeOrigin::root(); - PenpalA::execute_with(|| { - assert_ok!(::PolkadotXcm::send( + let sudo_penpal_origin = ::RuntimeOrigin::root(); + PenpalB::execute_with(|| { + assert_ok!(::PolkadotXcm::send( sudo_penpal_origin.clone(), bx!(assets_para_destination.clone()), bx!(xcm), )); - type RuntimeEvent = ::RuntimeEvent; + type RuntimeEvent = ::RuntimeEvent; assert_expected_events!( - PenpalA, + PenpalB, vec![ RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, ] diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index d618cd2fe04..2c43bb9d801 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -157,10 +157,12 @@ fn system_para_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { fn limited_teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = WESTEND_ED * 1000; + let dest = Westend::child_location_of(AssetHubWestend::para_id()); + let beneficiary = AssetHubWestendReceiver::get(); let test_args = TestContext { sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), + receiver: beneficiary.clone(), + args: relay_test_args(dest, beneficiary, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -204,7 +206,7 @@ fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubWestendSender::get(), receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -245,7 +247,7 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubWestendSender::get(), receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -278,10 +280,12 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { fn teleport_native_assets_from_relay_to_system_para_works() { // Init values for Relay Chain let amount_to_send: Balance = WESTEND_ED * 1000; + let dest = Westend::child_location_of(AssetHubWestend::para_id()); + let beneficiary = AssetHubWestendReceiver::get(); let test_args = TestContext { sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: relay_test_args(amount_to_send), + receiver: beneficiary.clone(), + args: relay_test_args(dest, beneficiary, amount_to_send), }; let mut test = RelayToSystemParaTest::new(test_args); @@ -325,7 +329,7 @@ fn teleport_native_assets_back_from_system_para_to_relay_works() { let test_args = TestContext { sender: AssetHubWestendSender::get(), receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -366,7 +370,7 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { let test_args = TestContext { sender: AssetHubWestendSender::get(), receiver: WestendReceiver::get(), - args: system_para_test_args(destination, beneficiary_id, amount_to_send, assets, None), + args: para_test_args(destination, beneficiary_id, amount_to_send, assets, None, 0), }; let mut test = SystemParaToRelayTest::new(test_args); @@ -394,16 +398,15 @@ fn teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -// TODO: uncomment when CollectivesWestend and BridgeHubWestend are implemented -// https://github.com/paritytech/polkadot-sdk/pull/1737 (CollectivesWestend) -// #[test] -// fn teleport_to_other_system_parachains_works() { -// let amount = ASSET_HUB_WESTEND_ED * 100; -// let native_asset: VersionedMultiAssets = (Parent, amount).into(); - -// test_parachain_is_trusted_teleporter!( -// AssetHubWestend, // Origin -// vec![CollectivesWestend, BridgeHubWestend], // Destinations -// (native_asset, amount) -// ); -// } +#[test] +fn teleport_to_other_system_parachains_works() { + let amount = ASSET_HUB_WESTEND_ED * 100; + let native_asset: MultiAssets = (Parent, amount).into(); + + test_parachain_is_trusted_teleporter!( + AssetHubWestend, // Origin + AssetHubWestendXcmConfig, // XCM Configuration + vec![BridgeHubWestend], // Destinations + (native_asset, amount) + ); +} -- GitLab From fd1ed403c95e1802c2c1bbb5cc724bb645ff0d8b Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Mon, 27 Nov 2023 17:50:38 +0400 Subject: [PATCH 132/199] Remove the timestamp handler (#2506) --- .gitlab-ci.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f7e54961fa9..aada30f1dda 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -69,7 +69,6 @@ default: .common-before-script: before_script: - !reference [.job-switcher, before_script] - - !reference [.timestamp, before_script] - !reference [.pipeline-stopper-vars, script] .job-switcher: @@ -211,10 +210,6 @@ include: - .gitlab/pipeline/publish.yml # zombienet jobs - .gitlab/pipeline/zombienet.yml - # timestamp handler - - project: parity/infrastructure/ci_cd/shared - ref: v0.2 - file: /common/timestamp.yml # ci image - project: parity/infrastructure/ci_cd/shared ref: main -- GitLab From 838a534da874cf6071fba1df07643c6c5b033ae0 Mon Sep 17 00:00:00 2001 From: Ross Bulat Date: Mon, 27 Nov 2023 21:42:04 +0700 Subject: [PATCH 133/199] Staking: `chill_other` takes stash instead of controller (#2501) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `chill_other` call is the only staking call that explicitly requires `controller` in its signature. This PR changes the controller arg to be the stash instead, with `StakingLedger` then fetching the controller from storage. This is not a breaking change per se - the call types do not change, but is noteworthy as UIs will now want to pass the stash account into `chill_other` calls, & metadata will reflect this. Note: This is very low impact. `chill_other` has [hardly ever been used](https://polkadot.subscan.io/extrinsic?address=&module=staking&call=chill_other&result=all&signedChecked=signed%20only&startDate=&endDate=&startBlock=&timeType=date&version=9431&endBlock=) on Polkadot - notwithstanding the one called 11 days ago at block 18177457 that was a part of test I did, the last call was made 493 days ago. Only 2 calls have ever been successful. Addresses controller deprecation #2500 --------- Co-authored-by: command-bot <> Co-authored-by: Gonçalo Pestana --- substrate/frame/staking/src/benchmarking.rs | 2 +- substrate/frame/staking/src/pallet/mod.rs | 13 +- substrate/frame/staking/src/weights.rs | 382 ++++++++++---------- 3 files changed, 203 insertions(+), 194 deletions(-) diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index 6f60c4909f4..c0c6a838aa5 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -873,7 +873,7 @@ benchmarks! { )?; let caller = whitelisted_caller(); - }: _(RawOrigin::Signed(caller), controller) + }: _(RawOrigin::Signed(caller), stash.clone()) verify { assert!(!T::VoterList::contains(&stash)); } diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 1f79ef63a47..ce80f22c2bb 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -1327,7 +1327,7 @@ pub mod pallet { // (temporary) passive migration. Self::ledger(StakingAccount::Stash(stash.clone())).map(|ledger| { let controller = ledger.controller() - .defensive_proof("ledger was fetched used the StakingInterface, so controller field must exist; qed.") + .defensive_proof("Ledger's controller field didn't exist. The controller should have been fetched using StakingLedger.") .ok_or(Error::::NotController)?; if controller == stash { @@ -1764,11 +1764,16 @@ pub mod pallet { /// who do not satisfy these requirements. #[pallet::call_index(23)] #[pallet::weight(T::WeightInfo::chill_other())] - pub fn chill_other(origin: OriginFor, controller: T::AccountId) -> DispatchResult { + pub fn chill_other(origin: OriginFor, stash: T::AccountId) -> DispatchResult { // Anyone can call this function. let caller = ensure_signed(origin)?; - let ledger = Self::ledger(Controller(controller.clone()))?; - let stash = ledger.stash; + let ledger = Self::ledger(Stash(stash.clone()))?; + let controller = ledger + .controller() + .defensive_proof( + "Ledger's controller field didn't exist. The controller should have been fetched using StakingLedger.", + ) + .ok_or(Error::::NotController)?; // In order for one user to chill another user, the following conditions must be met: // diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index ad9ca75a032..ae00509eaa8 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for `pallet_staking` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-17, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` //! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` @@ -98,8 +98,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 43_427_000 picoseconds. - Weight::from_parts(45_221_000, 4764) + // Minimum execution time: 42_895_000 picoseconds. + Weight::from_parts(44_924_000, 4764) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } @@ -119,8 +119,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 87_100_000 picoseconds. - Weight::from_parts(90_599_000, 8877) + // Minimum execution time: 87_734_000 picoseconds. + Weight::from_parts(90_762_000, 8877) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -146,8 +146,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 91_488_000 picoseconds. - Weight::from_parts(94_126_000, 8877) + // Minimum execution time: 90_914_000 picoseconds. + Weight::from_parts(94_156_000, 8877) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -166,10 +166,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 42_713_000 picoseconds. - Weight::from_parts(44_530_499, 4764) - // Standard Error: 1_067 - .saturating_add(Weight::from_parts(51_411, 0).saturating_mul(s.into())) + // Minimum execution time: 43_141_000 picoseconds. + Weight::from_parts(45_081_969, 4764) + // Standard Error: 1_010 + .saturating_add(Weight::from_parts(39_539, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(5_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -206,10 +206,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 88_700_000 picoseconds. - Weight::from_parts(98_329_624, 6248) - // Standard Error: 4_477 - .saturating_add(Weight::from_parts(1_347_380, 0).saturating_mul(s.into())) + // Minimum execution time: 87_743_000 picoseconds. + Weight::from_parts(96_983_484, 6248) + // Standard Error: 4_271 + .saturating_add(Weight::from_parts(1_382_993, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -241,8 +241,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 51_877_000 picoseconds. - Weight::from_parts(53_503_000, 4556) + // Minimum execution time: 51_888_000 picoseconds. + Weight::from_parts(54_353_000, 4556) .saturating_add(T::DbWeight::get().reads(11_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } @@ -255,10 +255,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_604_000 picoseconds. - Weight::from_parts(31_726_296, 4556) - // Standard Error: 6_350 - .saturating_add(Weight::from_parts(6_416_846, 0).saturating_mul(k.into())) + // Minimum execution time: 28_944_000 picoseconds. + Weight::from_parts(31_116_533, 4556) + // Standard Error: 11_848 + .saturating_add(Weight::from_parts(6_422_601, 0).saturating_mul(k.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -291,10 +291,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 64_276_000 picoseconds. - Weight::from_parts(62_615_844, 6248) - // Standard Error: 14_914 - .saturating_add(Weight::from_parts(4_097_019, 0).saturating_mul(n.into())) + // Minimum execution time: 63_921_000 picoseconds. + Weight::from_parts(62_662_863, 6248) + // Standard Error: 15_071 + .saturating_add(Weight::from_parts(3_950_084, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(6_u64)) @@ -318,8 +318,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 55_064_000 picoseconds. - Weight::from_parts(56_566_000, 6248) + // Minimum execution time: 54_605_000 picoseconds. + Weight::from_parts(56_406_000, 6248) .saturating_add(T::DbWeight::get().reads(8_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } @@ -333,8 +333,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_626_000 picoseconds. - Weight::from_parts(17_519_000, 4556) + // Minimum execution time: 16_826_000 picoseconds. + Weight::from_parts(17_326_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -348,8 +348,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 20_545_000 picoseconds. - Weight::from_parts(21_395_000, 4556) + // Minimum execution time: 20_831_000 picoseconds. + Weight::from_parts(21_615_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -361,8 +361,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 20_179_000 picoseconds. - Weight::from_parts(20_728_000, 4556) + // Minimum execution time: 20_190_000 picoseconds. + Weight::from_parts(20_993_000, 4556) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -372,8 +372,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_594_000 picoseconds. - Weight::from_parts(2_777_000, 0) + // Minimum execution time: 2_603_000 picoseconds. + Weight::from_parts(2_747_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -382,8 +382,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_302_000 picoseconds. - Weight::from_parts(8_741_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_745_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -392,8 +392,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_504_000 picoseconds. - Weight::from_parts(8_774_000, 0) + // Minimum execution time: 7_999_000 picoseconds. + Weight::from_parts(8_624_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -402,8 +402,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_474_000 picoseconds. - Weight::from_parts(8_740_000, 0) + // Minimum execution time: 8_131_000 picoseconds. + Weight::from_parts(8_467_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -413,10 +413,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_732_000 picoseconds. - Weight::from_parts(3_360_048, 0) - // Standard Error: 34 - .saturating_add(Weight::from_parts(9_964, 0).saturating_mul(v.into())) + // Minimum execution time: 2_731_000 picoseconds. + Weight::from_parts(3_298_421, 0) + // Standard Error: 31 + .saturating_add(Weight::from_parts(10_075, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) @@ -452,10 +452,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_495_000 picoseconds. - Weight::from_parts(95_710_388, 6248) - // Standard Error: 5_849 - .saturating_add(Weight::from_parts(1_339_335, 0).saturating_mul(s.into())) + // Minimum execution time: 86_305_000 picoseconds. + Weight::from_parts(94_494_401, 6248) + // Standard Error: 3_602 + .saturating_add(Weight::from_parts(1_339_477, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(13_u64)) .saturating_add(T::DbWeight::get().writes(12_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -468,10 +468,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 98_255_000 picoseconds. - Weight::from_parts(892_807_378, 70137) - // Standard Error: 57_735 - .saturating_add(Weight::from_parts(4_876_449, 0).saturating_mul(s.into())) + // Minimum execution time: 100_007_000 picoseconds. + Weight::from_parts(894_033_025, 70137) + // Standard Error: 57_584 + .saturating_add(Weight::from_parts(4_870_504, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -507,11 +507,11 @@ impl WeightInfo for SubstrateWeight { fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` - // Estimated: `30944 + n * (3774 ±3)` - // Minimum execution time: 144_258_000 picoseconds. - Weight::from_parts(175_256_796, 30944) - // Standard Error: 24_339 - .saturating_add(Weight::from_parts(46_011_700, 0).saturating_mul(n.into())) + // Estimated: `30944 + n * (3774 ±0)` + // Minimum execution time: 142_575_000 picoseconds. + Weight::from_parts(196_320_577, 30944) + // Standard Error: 29_330 + .saturating_add(Weight::from_parts(45_325_062, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(14_u64)) .saturating_add(T::DbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(4_u64)) @@ -535,10 +535,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 83_532_000 picoseconds. - Weight::from_parts(86_757_943, 8877) - // Standard Error: 6_379 - .saturating_add(Weight::from_parts(57_566, 0).saturating_mul(l.into())) + // Minimum execution time: 81_113_000 picoseconds. + Weight::from_parts(84_470_927, 8877) + // Standard Error: 5_588 + .saturating_add(Weight::from_parts(97_606, 0).saturating_mul(l.into())) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(7_u64)) } @@ -573,10 +573,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 96_776_000 picoseconds. - Weight::from_parts(100_950_027, 6248) - // Standard Error: 4_719 - .saturating_add(Weight::from_parts(1_432_091, 0).saturating_mul(s.into())) + // Minimum execution time: 94_810_000 picoseconds. + Weight::from_parts(99_292_156, 6248) + // Standard Error: 3_677 + .saturating_add(Weight::from_parts(1_345_598, 0).saturating_mul(s.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(11_u64)) .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -621,13 +621,13 @@ impl WeightInfo for SubstrateWeight { fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` - // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 577_699_000 picoseconds. - Weight::from_parts(582_827_000, 512390) - // Standard Error: 2_000_851 - .saturating_add(Weight::from_parts(67_316_744, 0).saturating_mul(v.into())) - // Standard Error: 199_373 - .saturating_add(Weight::from_parts(18_503_387, 0).saturating_mul(n.into())) + // Estimated: `512390 + n * (3566 ±4) + v * (3566 ±40)` + // Minimum execution time: 583_230_000 picoseconds. + Weight::from_parts(585_794_000, 512390) + // Standard Error: 1_984_644 + .saturating_add(Weight::from_parts(65_914_551, 0).saturating_mul(v.into())) + // Standard Error: 197_758 + .saturating_add(Weight::from_parts(18_105_424, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(206_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -658,12 +658,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_048_778_000 picoseconds. - Weight::from_parts(34_397_777_000, 512390) - // Standard Error: 346_115 - .saturating_add(Weight::from_parts(3_704_941, 0).saturating_mul(v.into())) - // Standard Error: 346_115 - .saturating_add(Weight::from_parts(4_064_819, 0).saturating_mul(n.into())) + // Minimum execution time: 33_312_958_000 picoseconds. + Weight::from_parts(4_949_866_209, 512390) + // Standard Error: 402_931 + .saturating_add(Weight::from_parts(16_448_367, 0).saturating_mul(v.into())) + // Standard Error: 402_931 + .saturating_add(Weight::from_parts(25_361_503, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(201_u64)) .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -680,10 +680,10 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_473_149_000 picoseconds. - Weight::from_parts(84_721_859, 3510) - // Standard Error: 8_690 - .saturating_add(Weight::from_parts(4_870_439, 0).saturating_mul(v.into())) + // Minimum execution time: 2_474_646_000 picoseconds. + Weight::from_parts(2_512_113_000, 3510) + // Standard Error: 33_996 + .saturating_add(Weight::from_parts(1_992_173, 0).saturating_mul(v.into())) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -704,8 +704,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_312_000 picoseconds. - Weight::from_parts(5_897_000, 0) + // Minimum execution time: 5_466_000 picoseconds. + Weight::from_parts(5_861_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) @@ -724,10 +724,12 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_676_000 picoseconds. - Weight::from_parts(4_913_000, 0) + // Minimum execution time: 4_780_000 picoseconds. + Weight::from_parts(4_998_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -750,11 +752,11 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1773` + // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 67_286_000 picoseconds. - Weight::from_parts(69_081_000, 6248) - .saturating_add(T::DbWeight::get().reads(11_u64)) + // Minimum execution time: 71_261_000 picoseconds. + Weight::from_parts(72_778_000, 6248) + .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -765,8 +767,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_749_000 picoseconds. - Weight::from_parts(13_275_000, 3510) + // Minimum execution time: 12_497_000 picoseconds. + Weight::from_parts(13_049_000, 3510) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -776,8 +778,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_155_000 picoseconds. - Weight::from_parts(3_319_000, 0) + // Minimum execution time: 3_044_000 picoseconds. + Weight::from_parts(3_278_000, 0) .saturating_add(T::DbWeight::get().writes(1_u64)) } } @@ -798,8 +800,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `927` // Estimated: `4764` - // Minimum execution time: 43_427_000 picoseconds. - Weight::from_parts(45_221_000, 4764) + // Minimum execution time: 42_895_000 picoseconds. + Weight::from_parts(44_924_000, 4764) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } @@ -819,8 +821,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1990` // Estimated: `8877` - // Minimum execution time: 87_100_000 picoseconds. - Weight::from_parts(90_599_000, 8877) + // Minimum execution time: 87_734_000 picoseconds. + Weight::from_parts(90_762_000, 8877) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -846,8 +848,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2195` // Estimated: `8877` - // Minimum execution time: 91_488_000 picoseconds. - Weight::from_parts(94_126_000, 8877) + // Minimum execution time: 90_914_000 picoseconds. + Weight::from_parts(94_156_000, 8877) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -866,10 +868,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1115` // Estimated: `4764` - // Minimum execution time: 42_713_000 picoseconds. - Weight::from_parts(44_530_499, 4764) - // Standard Error: 1_067 - .saturating_add(Weight::from_parts(51_411, 0).saturating_mul(s.into())) + // Minimum execution time: 43_141_000 picoseconds. + Weight::from_parts(45_081_969, 4764) + // Standard Error: 1_010 + .saturating_add(Weight::from_parts(39_539, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(5_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -906,10 +908,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 88_700_000 picoseconds. - Weight::from_parts(98_329_624, 6248) - // Standard Error: 4_477 - .saturating_add(Weight::from_parts(1_347_380, 0).saturating_mul(s.into())) + // Minimum execution time: 87_743_000 picoseconds. + Weight::from_parts(96_983_484, 6248) + // Standard Error: 4_271 + .saturating_add(Weight::from_parts(1_382_993, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -941,8 +943,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1372` // Estimated: `4556` - // Minimum execution time: 51_877_000 picoseconds. - Weight::from_parts(53_503_000, 4556) + // Minimum execution time: 51_888_000 picoseconds. + Weight::from_parts(54_353_000, 4556) .saturating_add(RocksDbWeight::get().reads(11_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } @@ -955,10 +957,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1280 + k * (569 ±0)` // Estimated: `4556 + k * (3033 ±0)` - // Minimum execution time: 29_604_000 picoseconds. - Weight::from_parts(31_726_296, 4556) - // Standard Error: 6_350 - .saturating_add(Weight::from_parts(6_416_846, 0).saturating_mul(k.into())) + // Minimum execution time: 28_944_000 picoseconds. + Weight::from_parts(31_116_533, 4556) + // Standard Error: 11_848 + .saturating_add(Weight::from_parts(6_422_601, 0).saturating_mul(k.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(k.into()))) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(k.into()))) @@ -991,10 +993,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1866 + n * (102 ±0)` // Estimated: `6248 + n * (2520 ±0)` - // Minimum execution time: 64_276_000 picoseconds. - Weight::from_parts(62_615_844, 6248) - // Standard Error: 14_914 - .saturating_add(Weight::from_parts(4_097_019, 0).saturating_mul(n.into())) + // Minimum execution time: 63_921_000 picoseconds. + Weight::from_parts(62_662_863, 6248) + // Standard Error: 15_071 + .saturating_add(Weight::from_parts(3_950_084, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(6_u64)) @@ -1018,8 +1020,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1650` // Estimated: `6248` - // Minimum execution time: 55_064_000 picoseconds. - Weight::from_parts(56_566_000, 6248) + // Minimum execution time: 54_605_000 picoseconds. + Weight::from_parts(56_406_000, 6248) .saturating_add(RocksDbWeight::get().reads(8_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } @@ -1033,8 +1035,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 16_626_000 picoseconds. - Weight::from_parts(17_519_000, 4556) + // Minimum execution time: 16_826_000 picoseconds. + Weight::from_parts(17_326_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1048,8 +1050,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `969` // Estimated: `4556` - // Minimum execution time: 20_545_000 picoseconds. - Weight::from_parts(21_395_000, 4556) + // Minimum execution time: 20_831_000 picoseconds. + Weight::from_parts(21_615_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1061,8 +1063,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `902` // Estimated: `4556` - // Minimum execution time: 20_179_000 picoseconds. - Weight::from_parts(20_728_000, 4556) + // Minimum execution time: 20_190_000 picoseconds. + Weight::from_parts(20_993_000, 4556) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -1072,8 +1074,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_594_000 picoseconds. - Weight::from_parts(2_777_000, 0) + // Minimum execution time: 2_603_000 picoseconds. + Weight::from_parts(2_747_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1082,8 +1084,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_302_000 picoseconds. - Weight::from_parts(8_741_000, 0) + // Minimum execution time: 8_070_000 picoseconds. + Weight::from_parts(8_745_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1092,8 +1094,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_504_000 picoseconds. - Weight::from_parts(8_774_000, 0) + // Minimum execution time: 7_999_000 picoseconds. + Weight::from_parts(8_624_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::ForceEra` (r:0 w:1) @@ -1102,8 +1104,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_474_000 picoseconds. - Weight::from_parts(8_740_000, 0) + // Minimum execution time: 8_131_000 picoseconds. + Weight::from_parts(8_467_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::Invulnerables` (r:0 w:1) @@ -1113,10 +1115,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_732_000 picoseconds. - Weight::from_parts(3_360_048, 0) - // Standard Error: 34 - .saturating_add(Weight::from_parts(9_964, 0).saturating_mul(v.into())) + // Minimum execution time: 2_731_000 picoseconds. + Weight::from_parts(3_298_421, 0) + // Standard Error: 31 + .saturating_add(Weight::from_parts(10_075, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `Staking::SlashingSpans` (r:1 w:1) @@ -1152,10 +1154,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 87_495_000 picoseconds. - Weight::from_parts(95_710_388, 6248) - // Standard Error: 5_849 - .saturating_add(Weight::from_parts(1_339_335, 0).saturating_mul(s.into())) + // Minimum execution time: 86_305_000 picoseconds. + Weight::from_parts(94_494_401, 6248) + // Standard Error: 3_602 + .saturating_add(Weight::from_parts(1_339_477, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(13_u64)) .saturating_add(RocksDbWeight::get().writes(12_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1168,10 +1170,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `66672` // Estimated: `70137` - // Minimum execution time: 98_255_000 picoseconds. - Weight::from_parts(892_807_378, 70137) - // Standard Error: 57_735 - .saturating_add(Weight::from_parts(4_876_449, 0).saturating_mul(s.into())) + // Minimum execution time: 100_007_000 picoseconds. + Weight::from_parts(894_033_025, 70137) + // Standard Error: 57_584 + .saturating_add(Weight::from_parts(4_870_504, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1207,11 +1209,11 @@ impl WeightInfo for () { fn payout_stakers_alive_staked(n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `33297 + n * (377 ±0)` - // Estimated: `30944 + n * (3774 ±3)` - // Minimum execution time: 144_258_000 picoseconds. - Weight::from_parts(175_256_796, 30944) - // Standard Error: 24_339 - .saturating_add(Weight::from_parts(46_011_700, 0).saturating_mul(n.into())) + // Estimated: `30944 + n * (3774 ±0)` + // Minimum execution time: 142_575_000 picoseconds. + Weight::from_parts(196_320_577, 30944) + // Standard Error: 29_330 + .saturating_add(Weight::from_parts(45_325_062, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(14_u64)) .saturating_add(RocksDbWeight::get().reads((6_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(4_u64)) @@ -1235,10 +1237,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `1991 + l * (7 ±0)` // Estimated: `8877` - // Minimum execution time: 83_532_000 picoseconds. - Weight::from_parts(86_757_943, 8877) - // Standard Error: 6_379 - .saturating_add(Weight::from_parts(57_566, 0).saturating_mul(l.into())) + // Minimum execution time: 81_113_000 picoseconds. + Weight::from_parts(84_470_927, 8877) + // Standard Error: 5_588 + .saturating_add(Weight::from_parts(97_606, 0).saturating_mul(l.into())) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(7_u64)) } @@ -1273,10 +1275,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `2196 + s * (4 ±0)` // Estimated: `6248 + s * (4 ±0)` - // Minimum execution time: 96_776_000 picoseconds. - Weight::from_parts(100_950_027, 6248) - // Standard Error: 4_719 - .saturating_add(Weight::from_parts(1_432_091, 0).saturating_mul(s.into())) + // Minimum execution time: 94_810_000 picoseconds. + Weight::from_parts(99_292_156, 6248) + // Standard Error: 3_677 + .saturating_add(Weight::from_parts(1_345_598, 0).saturating_mul(s.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(11_u64)) .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) @@ -1321,13 +1323,13 @@ impl WeightInfo for () { fn new_era(v: u32, n: u32, ) -> Weight { // Proof Size summary in bytes: // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` - // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 577_699_000 picoseconds. - Weight::from_parts(582_827_000, 512390) - // Standard Error: 2_000_851 - .saturating_add(Weight::from_parts(67_316_744, 0).saturating_mul(v.into())) - // Standard Error: 199_373 - .saturating_add(Weight::from_parts(18_503_387, 0).saturating_mul(n.into())) + // Estimated: `512390 + n * (3566 ±4) + v * (3566 ±40)` + // Minimum execution time: 583_230_000 picoseconds. + Weight::from_parts(585_794_000, 512390) + // Standard Error: 1_984_644 + .saturating_add(Weight::from_parts(65_914_551, 0).saturating_mul(v.into())) + // Standard Error: 197_758 + .saturating_add(Weight::from_parts(18_105_424, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(206_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1358,12 +1360,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `3175 + n * (911 ±0) + v * (395 ±0)` // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 34_048_778_000 picoseconds. - Weight::from_parts(34_397_777_000, 512390) - // Standard Error: 346_115 - .saturating_add(Weight::from_parts(3_704_941, 0).saturating_mul(v.into())) - // Standard Error: 346_115 - .saturating_add(Weight::from_parts(4_064_819, 0).saturating_mul(n.into())) + // Minimum execution time: 33_312_958_000 picoseconds. + Weight::from_parts(4_949_866_209, 512390) + // Standard Error: 402_931 + .saturating_add(Weight::from_parts(16_448_367, 0).saturating_mul(v.into())) + // Standard Error: 402_931 + .saturating_add(Weight::from_parts(25_361_503, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(201_u64)) .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) @@ -1380,10 +1382,10 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `979 + v * (50 ±0)` // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_473_149_000 picoseconds. - Weight::from_parts(84_721_859, 3510) - // Standard Error: 8_690 - .saturating_add(Weight::from_parts(4_870_439, 0).saturating_mul(v.into())) + // Minimum execution time: 2_474_646_000 picoseconds. + Weight::from_parts(2_512_113_000, 3510) + // Standard Error: 33_996 + .saturating_add(Weight::from_parts(1_992_173, 0).saturating_mul(v.into())) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(v.into()))) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) @@ -1404,8 +1406,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_312_000 picoseconds. - Weight::from_parts(5_897_000, 0) + // Minimum execution time: 5_466_000 picoseconds. + Weight::from_parts(5_861_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:0 w:1) @@ -1424,10 +1426,12 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_676_000 picoseconds. - Weight::from_parts(4_913_000, 0) + // Minimum execution time: 4_780_000 picoseconds. + Weight::from_parts(4_998_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } + /// Storage: `Staking::Bonded` (r:1 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Nominators` (r:1 w:1) @@ -1450,11 +1454,11 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill_other() -> Weight { // Proof Size summary in bytes: - // Measured: `1773` + // Measured: `1939` // Estimated: `6248` - // Minimum execution time: 67_286_000 picoseconds. - Weight::from_parts(69_081_000, 6248) - .saturating_add(RocksDbWeight::get().reads(11_u64)) + // Minimum execution time: 71_261_000 picoseconds. + Weight::from_parts(72_778_000, 6248) + .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `Staking::MinCommission` (r:1 w:0) @@ -1465,8 +1469,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `691` // Estimated: `3510` - // Minimum execution time: 12_749_000 picoseconds. - Weight::from_parts(13_275_000, 3510) + // Minimum execution time: 12_497_000 picoseconds. + Weight::from_parts(13_049_000, 3510) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1476,8 +1480,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_155_000 picoseconds. - Weight::from_parts(3_319_000, 0) + // Minimum execution time: 3_044_000 picoseconds. + Weight::from_parts(3_278_000, 0) .saturating_add(RocksDbWeight::get().writes(1_u64)) } } -- GitLab From db290997651dae8615be706efdd74f0ab0db52ee Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Tue, 28 Nov 2023 09:42:14 +0400 Subject: [PATCH 134/199] CI: Fix `build-and-attach-release-runtimes.yml` (#2471) Incorporate suggestion from release team to get this workflow working. edit: worked on this test GH release: https://github.com/paritytech/polkadot-sdk/releases/tag/liam-debug-ghw. let's try it. --- .github/workflows/build-and-attach-release-runtimes.yml | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/workflows/build-and-attach-release-runtimes.yml b/.github/workflows/build-and-attach-release-runtimes.yml index db0175c6855..8e0a5ba04b4 100644 --- a/.github/workflows/build-and-attach-release-runtimes.yml +++ b/.github/workflows/build-and-attach-release-runtimes.yml @@ -3,7 +3,7 @@ name: Build and Attach Runtimes to Releases/RC on: release: types: - - created + - published env: PROFILE: production @@ -44,12 +44,6 @@ jobs: runtime_dir: ${{ matrix.runtime.path }} profile: ${{ env.PROFILE }} - - name: Build Summary - run: | - echo "${{ steps.srtool_build.outputs.json }}" | jq . > ${{ matrix.runtime.name }}-srtool-digest.json - cat ${{ matrix.runtime.name }}-srtool-digest.json - echo "Runtime location: ${{ steps.srtool_build.outputs.wasm }}" - - name: Set up paths and runtime names id: setup run: | -- GitLab From 2ac23d247d2d81e6f67116a1696beaf189192b78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gon=C3=A7alo=20Pestana?= Date: Tue, 28 Nov 2023 08:11:46 +0100 Subject: [PATCH 135/199] Fixes cumulus README instructions (#2442) README instructions fixes to be compatible with the `polkadot-prepare` and `polkadot-execute` binary split. --- cumulus/README.md | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/cumulus/README.md b/cumulus/README.md index 19f9f3f113d..6acbf6dc2ca 100644 --- a/cumulus/README.md +++ b/cumulus/README.md @@ -142,8 +142,8 @@ zombienet --provider native spawn ./zombienet/examples/small_network.toml # Clone git clone https://github.com/paritytech/polkadot-sdk -# Compile Polkadot -cargo build --release --bin polkadot +# Compile Polkadot's required binaries +cargo build --release -p polkadot # Generate a raw chain spec ./target/release/polkadot build-spec --chain rococo-local --disable-default-bootnode --raw > rococo-local-cfde.json @@ -158,11 +158,8 @@ cargo build --release --bin polkadot #### Launch the Parachain ```bash -# Clone -git clone https://github.com/paritytech/polkadot-sdk - # Compile -cargo build --release --bin polkadot-parachain +cargo build --release -p polkadot-parachain-bin # Export genesis state ./target/release/polkadot-parachain export-genesis-state > genesis-state @@ -172,15 +169,15 @@ cargo build --release --bin polkadot-parachain # Collator1 ./target/release/polkadot-parachain --collator --alice --force-authoring \ - --tmp --port 40335 --rpc-port 9946 -- --chain ../polkadot/rococo-local-cfde.json --port 30335 + --tmp --port 40335 --rpc-port 9946 -- --chain rococo-local-cfde.json --port 30335 # Collator2 ./target/release/polkadot-parachain --collator --bob --force-authoring \ - --tmp --port 40336 --rpc-port 9947 -- --chain ../polkadot/rococo-local-cfde.json --port 30336 + --tmp --port 40336 --rpc-port 9947 -- --chain rococo-local-cfde.json --port 30336 # Parachain Full Node 1 ./target/release/polkadot-parachain --tmp --port 40337 --rpc-port 9948 -- \ - --chain ../polkadot/rococo-local-cfde.json --port 30337 + --chain rococo-local-cfde.json --port 30337 ``` #### Register the parachain -- GitLab From 75062717de9f3dbe741d058309600c864cc603c3 Mon Sep 17 00:00:00 2001 From: Ross Bulat Date: Tue, 28 Nov 2023 14:19:49 +0700 Subject: [PATCH 136/199] Pools: Add ability to configure commission claiming permissions (#2474) Addresses #409. This request has been raised by multiple community members - the ability for the nomination pool root role to configure permissionless commission claiming: > Would it be possible to have a claim_commission_other extrinsic for claiming commission of nomination pools permissionless? This PR does not quite introduce this additional call, but amends `do_claim_commission` to check a new `claim_permission` field in the `Commission` struct, configured by an enum: ``` enum CommissionClaimPermission { Permissionless, Account(AccountId), } ``` This can be optionally set in a bonded pool's `commission.claim_permission` field: ``` struct BondedPool { commission: { claim_permission: Option>, }, } ``` This is a new field and requires a migration to add it to existing pools. This will be `None` on pool creation, falling back to the `root` role having sole access to claim commission if it is not set; this is the behaviour as it is today. Once set, the field _can_ be set to `None` again. #### Changes - [x] Add `commision.claim_permission` field. - [x] Add `can_claim_commission` and amend `do_claim_commission`. - [x] Add `set_commission_claim_permission` call. - [x] Test to cover new configs and call. - [x] Add and amend benchmarks. - [x] Generate new weights + slot into call `set_commission_claim_permission`. - [x] Add migration to introduce `commission.claim_permission`, bump storage version. - [x] Update Westend weights. - [x] Migration working. --------- Co-authored-by: command-bot <> --- polkadot/runtime/westend/src/lib.rs | 3 +- .../src/weights/pallet_nomination_pools.rs | 276 ++++----- .../nomination-pools/benchmarking/src/lib.rs | 41 +- substrate/frame/nomination-pools/src/lib.rs | 58 +- .../frame/nomination-pools/src/migration.rs | 77 +++ substrate/frame/nomination-pools/src/tests.rs | 211 ++++++- .../frame/nomination-pools/src/weights.rs | 523 ++++++++++-------- 7 files changed, 796 insertions(+), 393 deletions(-) diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 6bbdb2db098..bb6331e9534 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1533,11 +1533,10 @@ pub mod migrations { parachains_configuration::migration::v8::MigrateToV8, parachains_configuration::migration::v9::MigrateToV9, paras_registrar::migration::MigrateToV1, - pallet_nomination_pools::migration::versioned::V5toV6, pallet_referenda::migration::v1::MigrateV0ToV1, - pallet_nomination_pools::migration::versioned::V6ToV7, pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, + pallet_nomination_pools::migration::versioned::V7ToV8, ); } diff --git a/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs b/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs index 49bc687a3e4..6aa5ddd1ec8 100644 --- a/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs +++ b/polkadot/runtime/westend/src/weights/pallet_nomination_pools.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_nomination_pools` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-22, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-24, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 // Executed Command: @@ -53,7 +53,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -78,20 +78,22 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3318` + // Measured: `3355` // Estimated: `8877` - // Minimum execution time: 187_795_000 picoseconds. - Weight::from_parts(193_857_000, 0) + // Minimum execution time: 173_707_000 picoseconds. + Weight::from_parts(179_920_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(19)) - .saturating_add(T::DbWeight::get().writes(12)) + .saturating_add(T::DbWeight::get().reads(20)) + .saturating_add(T::DbWeight::get().writes(13)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -110,22 +112,24 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3328` + // Measured: `3365` // Estimated: `8877` - // Minimum execution time: 186_245_000 picoseconds. - Weight::from_parts(190_916_000, 0) + // Minimum execution time: 174_414_000 picoseconds. + Weight::from_parts(178_068_000, 0) .saturating_add(Weight::from_parts(0, 8877)) - .saturating_add(T::DbWeight::get().reads(16)) - .saturating_add(T::DbWeight::get().writes(12)) + .saturating_add(T::DbWeight::get().reads(17)) + .saturating_add(T::DbWeight::get().writes(13)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -144,22 +148,24 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3274` + // Measured: `3312` // Estimated: `8799` - // Minimum execution time: 217_918_000 picoseconds. - Weight::from_parts(224_772_000, 0) + // Minimum execution time: 198_864_000 picoseconds. + Weight::from_parts(203_783_000, 0) .saturating_add(Weight::from_parts(0, 8799)) - .saturating_add(T::DbWeight::get().reads(16)) - .saturating_add(T::DbWeight::get().writes(12)) + .saturating_add(T::DbWeight::get().reads(17)) + .saturating_add(T::DbWeight::get().writes(13)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -168,10 +174,10 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_payout() -> Weight { // Proof Size summary in bytes: - // Measured: `1137` + // Measured: `1138` // Estimated: `4182` - // Minimum execution time: 76_958_000 picoseconds. - Weight::from_parts(78_278_000, 0) + // Minimum execution time: 70_250_000 picoseconds. + Weight::from_parts(72_231_000, 0) .saturating_add(Weight::from_parts(0, 4182)) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) @@ -179,7 +185,7 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -210,16 +216,16 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3597` + // Measured: `3545` // Estimated: `8877` - // Minimum execution time: 170_992_000 picoseconds. - Weight::from_parts(179_987_000, 0) + // Minimum execution time: 155_853_000 picoseconds. + Weight::from_parts(161_032_000, 0) .saturating_add(Weight::from_parts(0, 8877)) .saturating_add(T::DbWeight::get().reads(20)) .saturating_add(T::DbWeight::get().writes(13)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -230,25 +236,27 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1670` + // Measured: `1744` // Estimated: `4764` - // Minimum execution time: 60_740_000 picoseconds. - Weight::from_parts(64_502_831, 0) + // Minimum execution time: 62_933_000 picoseconds. + Weight::from_parts(65_847_171, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 2_724 - .saturating_add(Weight::from_parts(37_725, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) + // Standard Error: 1_476 + .saturating_add(Weight::from_parts(59_648, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -261,6 +269,8 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) @@ -268,22 +278,22 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2098` + // Measured: `2134` // Estimated: `4764` - // Minimum execution time: 127_322_000 picoseconds. - Weight::from_parts(132_064_603, 0) + // Minimum execution time: 123_641_000 picoseconds. + Weight::from_parts(127_222_589, 0) .saturating_add(Weight::from_parts(0, 4764)) - // Standard Error: 3_424 - .saturating_add(Weight::from_parts(64_590, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(8)) + // Standard Error: 2_493 + .saturating_add(Weight::from_parts(83_361, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(11)) + .saturating_add(T::DbWeight::get().writes(9)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(717), added: 3192, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(261), added: 2736, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -292,16 +302,18 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Validators` (r:1 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:1 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) @@ -323,17 +335,15 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. - fn withdraw_unbonded_kill(s: u32, ) -> Weight { + fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2454` + // Measured: `2453` // Estimated: `8538` - // Minimum execution time: 236_510_000 picoseconds. - Weight::from_parts(243_943_334, 0) + // Minimum execution time: 219_469_000 picoseconds. + Weight::from_parts(227_526_000, 0) .saturating_add(Weight::from_parts(0, 8538)) - // Standard Error: 4_864 - .saturating_add(Weight::from_parts(14_974, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(23)) - .saturating_add(T::DbWeight::get().writes(19)) + .saturating_add(T::DbWeight::get().reads(24)) + .saturating_add(T::DbWeight::get().writes(20)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -359,14 +369,12 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) @@ -376,21 +384,23 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1222` + // Measured: `1102` // Estimated: `8538` - // Minimum execution time: 197_883_000 picoseconds. - Weight::from_parts(201_750_000, 0) + // Minimum execution time: 166_466_000 picoseconds. + Weight::from_parts(171_425_000, 0) .saturating_add(Weight::from_parts(0, 8538)) - .saturating_add(T::DbWeight::get().reads(24)) - .saturating_add(T::DbWeight::get().writes(16)) + .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().writes(17)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -416,36 +426,36 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1779` + // Measured: `1738` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 65_505_000 picoseconds. - Weight::from_parts(67_148_657, 0) + // Minimum execution time: 59_650_000 picoseconds. + Weight::from_parts(60_620_077, 0) .saturating_add(Weight::from_parts(0, 4556)) - // Standard Error: 9_115 - .saturating_add(Weight::from_parts(1_421_198, 0).saturating_mul(n.into())) + // Standard Error: 7_316 + .saturating_add(Weight::from_parts(1_467_406, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1367` + // Measured: `1363` // Estimated: `4556` - // Minimum execution time: 34_157_000 picoseconds. - Weight::from_parts(35_557_000, 0) + // Minimum execution time: 31_170_000 picoseconds. + Weight::from_parts(32_217_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::Metadata` (r:1 w:1) /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForMetadata` (r:1 w:1) @@ -453,13 +463,13 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `497` + // Measured: `498` // Estimated: `3735` - // Minimum execution time: 13_806_000 picoseconds. - Weight::from_parts(14_540_018, 0) + // Minimum execution time: 12_603_000 picoseconds. + Weight::from_parts(13_241_702, 0) .saturating_add(Weight::from_parts(0, 3735)) - // Standard Error: 123 - .saturating_add(Weight::from_parts(644, 0).saturating_mul(n.into())) + // Standard Error: 116 + .saturating_add(Weight::from_parts(1_428, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -479,25 +489,25 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_870_000 picoseconds. - Weight::from_parts(6_253_000, 0) + // Minimum execution time: 3_608_000 picoseconds. + Weight::from_parts(3_801_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(6)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn update_roles() -> Weight { // Proof Size summary in bytes: - // Measured: `497` - // Estimated: `3685` - // Minimum execution time: 18_290_000 picoseconds. - Weight::from_parts(18_961_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 16_053_000 picoseconds. + Weight::from_parts(16_473_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -516,16 +526,16 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `1942` + // Measured: `1901` // Estimated: `4556` - // Minimum execution time: 63_708_000 picoseconds. - Weight::from_parts(65_570_000, 0) + // Minimum execution time: 57_251_000 picoseconds. + Weight::from_parts(59_390_000, 0) .saturating_add(Weight::from_parts(0, 4556)) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -534,37 +544,49 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn set_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `736` - // Estimated: `3685` - // Minimum execution time: 34_291_000 picoseconds. - Weight::from_parts(34_767_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `770` + // Estimated: `3719` + // Minimum execution time: 29_888_000 picoseconds. + Weight::from_parts(31_056_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_commission_max() -> Weight { // Proof Size summary in bytes: - // Measured: `537` - // Estimated: `3685` - // Minimum execution time: 18_406_000 picoseconds. - Weight::from_parts(18_999_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `538` + // Estimated: `3719` + // Minimum execution time: 15_769_000 picoseconds. + Weight::from_parts(16_579_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn set_commission_change_rate() -> Weight { // Proof Size summary in bytes: - // Measured: `497` - // Estimated: `3685` - // Minimum execution time: 18_440_000 picoseconds. - Weight::from_parts(19_230_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 15_385_000 picoseconds. + Weight::from_parts(16_402_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn set_commission_claim_permission() -> Weight { + // Proof Size summary in bytes: + // Measured: `498` + // Estimated: `3719` + // Minimum execution time: 14_965_000 picoseconds. + Weight::from_parts(15_548_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -576,14 +598,14 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `508` // Estimated: `4182` - // Minimum execution time: 14_310_000 picoseconds. - Weight::from_parts(14_681_000, 0) + // Minimum execution time: 13_549_000 picoseconds. + Weight::from_parts(14_307_000, 0) .saturating_add(Weight::from_parts(0, 4182)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -592,16 +614,16 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `934` - // Estimated: `3685` - // Minimum execution time: 64_526_000 picoseconds. - Weight::from_parts(66_800_000, 0) - .saturating_add(Weight::from_parts(0, 3685)) + // Measured: `968` + // Estimated: `3719` + // Minimum execution time: 60_153_000 picoseconds. + Weight::from_parts(61_369_000, 0) + .saturating_add(Weight::from_parts(0, 3719)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -610,10 +632,10 @@ impl pallet_nomination_pools::WeightInfo for WeightInfo /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) fn adjust_pool_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `866` + // Measured: `867` // Estimated: `4764` - // Minimum execution time: 73_472_000 picoseconds. - Weight::from_parts(74_698_000, 0) + // Minimum execution time: 64_985_000 picoseconds. + Weight::from_parts(66_616_000, 0) .saturating_add(Weight::from_parts(0, 4764)) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(2)) diff --git a/substrate/frame/nomination-pools/benchmarking/src/lib.rs b/substrate/frame/nomination-pools/benchmarking/src/lib.rs index fc86a6f56c0..48d7dae29ef 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/lib.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/lib.rs @@ -35,9 +35,9 @@ use frame_support::{ use frame_system::RawOrigin as RuntimeOrigin; use pallet_nomination_pools::{ BalanceOf, BondExtra, BondedPoolInner, BondedPools, ClaimPermission, ClaimPermissions, - Commission, CommissionChangeRate, ConfigOp, GlobalMaxCommission, MaxPoolMembers, - MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, Pallet as Pools, - PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, + Commission, CommissionChangeRate, CommissionClaimPermission, ConfigOp, GlobalMaxCommission, + MaxPoolMembers, MaxPoolMembersPerPool, MaxPools, Metadata, MinCreateBond, MinJoinBond, + Pallet as Pools, PoolMembers, PoolRoles, PoolState, RewardPools, SubPoolsStorage, }; use pallet_staking::MaxNominationsOf; use sp_runtime::{ @@ -706,17 +706,24 @@ frame_benchmarking::benchmarks! { max_increase: Perbill::from_percent(20), min_delay: 0u32.into(), }).unwrap(); + // set a claim permission to an account. + Pools::::set_commission_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Some(CommissionClaimPermission::Account(depositor.clone())) + ).unwrap(); }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some((Perbill::from_percent(20), depositor.clone()))) verify { assert_eq!(BondedPools::::get(1).unwrap().commission, Commission { - current: Some((Perbill::from_percent(20), depositor)), + current: Some((Perbill::from_percent(20), depositor.clone())), max: Some(Perbill::from_percent(50)), change_rate: Some(CommissionChangeRate { max_increase: Perbill::from_percent(20), min_delay: 0u32.into() }), throttle_from: Some(1u32.into()), + claim_permission: Some(CommissionClaimPermission::Account(depositor)), }); } @@ -731,6 +738,7 @@ frame_benchmarking::benchmarks! { max: Some(Perbill::from_percent(50)), change_rate: None, throttle_from: Some(0u32.into()), + claim_permission: None, }); } @@ -751,9 +759,25 @@ frame_benchmarking::benchmarks! { min_delay: 1000u32.into(), }), throttle_from: Some(1_u32.into()), + claim_permission: None, }); } + set_commission_claim_permission { + // Create a pool. + let (depositor, pool_account) = create_pool_account::(0, Pools::::depositor_min_bond() * 2u32.into(), None); + }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into(), Some(CommissionClaimPermission::Account(depositor.clone()))) + verify { + assert_eq!( + BondedPools::::get(1).unwrap().commission, Commission { + current: None, + max: None, + change_rate: None, + throttle_from: None, + claim_permission: Some(CommissionClaimPermission::Account(depositor)), + }); + } + set_claim_permission { // Create a pool let min_create_bond = Pools::::depositor_min_bond(); @@ -786,8 +810,13 @@ frame_benchmarking::benchmarks! { CurrencyOf::::set_balance(&reward_account, ed + origin_weight); // member claims a payout to make some commission available. - let _ = Pools::::claim_payout(RuntimeOrigin::Signed(claimer).into()); - + let _ = Pools::::claim_payout(RuntimeOrigin::Signed(claimer.clone()).into()); + // set a claim permission to an account. + let _ = Pools::::set_commission_claim_permission( + RuntimeOrigin::Signed(depositor.clone()).into(), + 1u32.into(), + Some(CommissionClaimPermission::Account(claimer)) + ); whitelist_account!(depositor); }:_(RuntimeOrigin::Signed(depositor.clone()), 1u32.into()) verify { diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index ab4bd51ffc0..f191126fbdd 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -676,6 +676,13 @@ pub struct PoolRoles { pub bouncer: Option, } +// A pool's possible commission claiming permissions. +#[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen)] +pub enum CommissionClaimPermission { + Permissionless, + Account(AccountId), +} + /// Pool commission. /// /// The pool `root` can set commission configuration after pool creation. By default, all commission @@ -705,6 +712,9 @@ pub struct Commission { /// The block from where throttling should be checked from. This value will be updated on all /// commission updates and when setting an initial `change_rate`. pub throttle_from: Option>, + // Whether commission can be claimed permissionlessly, or whether an account can claim + // commission. `Root` role can always claim. + pub claim_permission: Option>, } impl Commission { @@ -1078,6 +1088,17 @@ impl BondedPool { self.is_root(who) } + fn can_claim_commission(&self, who: &T::AccountId) -> bool { + if let Some(permission) = self.commission.claim_permission.as_ref() { + match permission { + CommissionClaimPermission::Permissionless => true, + CommissionClaimPermission::Account(account) => account == who || self.is_root(who), + } + } else { + self.is_root(who) + } + } + fn is_destroying(&self) -> bool { matches!(self.state, PoolState::Destroying) } @@ -1572,7 +1593,7 @@ pub mod pallet { use sp_runtime::Perbill; /// The current storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(7); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(8); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -1850,6 +1871,11 @@ pub mod pallet { pool_id: PoolId, change_rate: CommissionChangeRate>, }, + /// Pool commission claim permission has been updated. + PoolCommissionClaimPermissionUpdated { + pool_id: PoolId, + permission: Option>, + }, /// Pool commission has been claimed. PoolCommissionClaimed { pool_id: PoolId, commission: BalanceOf }, /// Topped up deficit in frozen ED of the reward pool. @@ -2742,6 +2768,32 @@ pub mod pallet { let who = ensure_signed(origin)?; Self::do_adjust_pool_deposit(who, pool_id) } + + /// Set or remove a pool's commission claim permission. + /// + /// Determines who can claim the pool's pending commission. Only the `Root` role of the pool + /// is able to conifigure commission claim permissions. + #[pallet::call_index(22)] + #[pallet::weight(T::WeightInfo::set_commission_claim_permission())] + pub fn set_commission_claim_permission( + origin: OriginFor, + pool_id: PoolId, + permission: Option>, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + ensure!(bonded_pool.can_manage_commission(&who), Error::::DoesNotHavePermission); + + bonded_pool.commission.claim_permission = permission.clone(); + bonded_pool.put(); + + Self::deposit_event(Event::::PoolCommissionClaimPermissionUpdated { + pool_id, + permission, + }); + + Ok(()) + } } #[pallet::hooks] @@ -3106,12 +3158,12 @@ impl Pallet { fn do_claim_commission(who: T::AccountId, pool_id: PoolId) -> DispatchResult { let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; - ensure!(bonded_pool.can_manage_commission(&who), Error::::DoesNotHavePermission); + ensure!(bonded_pool.can_claim_commission(&who), Error::::DoesNotHavePermission); let mut reward_pool = RewardPools::::get(pool_id) .defensive_ok_or::>(DefensiveError::RewardPoolNotFound.into())?; - // IMPORTANT: make sure that any newly pending commission not yet processed is added to + // IMPORTANT: ensure newly pending commission not yet processed is added to // `total_commission_pending`. reward_pool.update_records( pool_id, diff --git a/substrate/frame/nomination-pools/src/migration.rs b/substrate/frame/nomination-pools/src/migration.rs index 3d68fee1dca..3adfd926d95 100644 --- a/substrate/frame/nomination-pools/src/migration.rs +++ b/substrate/frame/nomination-pools/src/migration.rs @@ -27,6 +27,15 @@ use sp_runtime::TryRuntimeError; pub mod versioned { use super::*; + /// v8: Adds commission claim permissions to `BondedPools`. + pub type V7ToV8 = frame_support::migrations::VersionedMigration< + 7, + 8, + v8::VersionUncheckedMigrateV7ToV8, + crate::pallet::Pallet, + ::DbWeight, + >; + /// Migration V6 to V7 wrapped in a [`frame_support::migrations::VersionedMigration`], ensuring /// the migration is only performed when on-chain version is 6. pub type V6ToV7 = frame_support::migrations::VersionedMigration< @@ -47,6 +56,74 @@ pub mod versioned { >; } +pub mod v8 { + use super::*; + + #[derive(Decode)] + pub struct OldCommission { + pub current: Option<(Perbill, T::AccountId)>, + pub max: Option, + pub change_rate: Option>>, + pub throttle_from: Option>, + } + + #[derive(Decode)] + pub struct OldBondedPoolInner { + pub commission: OldCommission, + pub member_counter: u32, + pub points: BalanceOf, + pub roles: PoolRoles, + pub state: PoolState, + } + + impl OldBondedPoolInner { + fn migrate_to_v8(self) -> BondedPoolInner { + BondedPoolInner { + commission: Commission { + current: self.commission.current, + max: self.commission.max, + change_rate: self.commission.change_rate, + throttle_from: self.commission.throttle_from, + // `claim_permission` is a new field. + claim_permission: None, + }, + member_counter: self.member_counter, + points: self.points, + roles: self.roles, + state: self.state, + } + } + } + + pub struct VersionUncheckedMigrateV7ToV8(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for VersionUncheckedMigrateV7ToV8 { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + let mut translated = 0u64; + BondedPools::::translate::, _>(|_key, old_value| { + translated.saturating_inc(); + Some(old_value.migrate_to_v8()) + }); + T::DbWeight::get().reads_writes(translated, translated + 1) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), TryRuntimeError> { + // Check new `claim_permission` field is present. + ensure!( + BondedPools::::iter() + .all(|(_, inner)| inner.commission.claim_permission.is_none()), + "`claim_permission` value has not been set correctly." + ); + Ok(()) + } + } +} + /// This migration accumulates and initializes the [`TotalValueLocked`] for all pools. /// /// WARNING: This migration works under the assumption that the [`BondedPools`] cannot be inflated diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 2749e89ecff..7fe1e704bb1 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -5761,7 +5761,13 @@ mod commission { // Then: assert_eq!( BondedPool::::get(1).unwrap().commission, - Commission { current: None, max: None, change_rate: None, throttle_from: Some(1) } + Commission { + current: None, + max: None, + change_rate: None, + throttle_from: Some(1), + claim_permission: None, + } ); assert_eq!( pool_events_since_last_call(), @@ -5956,6 +5962,7 @@ mod commission { min_delay: 2_u64 }), throttle_from: Some(1_u64), + claim_permission: None, } ); assert_eq!( @@ -6007,6 +6014,7 @@ mod commission { min_delay: 2_u64 }), throttle_from: Some(3_u64), + claim_permission: None, } ); assert_eq!( @@ -6082,7 +6090,8 @@ mod commission { max_increase: Perbill::from_percent(1), min_delay: 2 }), - throttle_from: Some(7) + throttle_from: Some(7), + claim_permission: None, } ); assert_eq!( @@ -6183,6 +6192,7 @@ mod commission { max: Some(Perbill::from_percent(50)), change_rate: None, throttle_from: Some(1), + claim_permission: None, } ); @@ -6409,6 +6419,7 @@ mod commission { min_delay: 10_u64 }), throttle_from: Some(11), + claim_permission: None, } ); @@ -6502,7 +6513,8 @@ mod commission { max_increase: Perbill::from_percent(1), min_delay: 0 }), - throttle_from: Some(1) + throttle_from: Some(1), + claim_permission: None, } ); @@ -6885,6 +6897,13 @@ mod commission { #[test] fn claim_commission_works() { ExtBuilder::default().build_and_execute(|| { + /// Deposit rewards into the pool and claim payout. This will set up pending commission + /// to be tested in various scenarios. + fn deposit_rewards_and_claim_payout(caller: AccountId, points: u128) { + deposit_rewards(points); + assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(caller))); + } + let pool_id = 1; let _ = Currency::set_balance(&900, 5); @@ -6905,21 +6924,9 @@ mod commission { ] ); - // Pool earns 80 points, payout is triggered. - deposit_rewards(80); - assert_eq!( - PoolMembers::::get(10).unwrap(), - PoolMember:: { pool_id, points: 10, ..Default::default() } - ); - - assert_ok!(Pools::claim_payout(RuntimeOrigin::signed(10))); - assert_eq!( - pool_events_since_last_call(), - vec![Event::PaidOut { member: 10, pool_id, payout: 40 }] - ); - // Given: - assert_eq!(RewardPool::::current_balance(pool_id), 40); + deposit_rewards_and_claim_payout(10, 100); + assert_eq!(RewardPool::::current_balance(pool_id), 50); // Pool does not exist assert_noop!( @@ -6944,6 +6951,176 @@ mod commission { Pools::claim_commission(RuntimeOrigin::signed(900), pool_id,), Error::::NoPendingCommission ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 } + ] + ); + + // The pool commission's claim_permission field is updated to `Permissionless` by the + // root member, which means anyone can now claim commission for the pool. + + // Given: + // Some random non-pool member to claim commission. + let non_pool_member = 1001; + let _ = Currency::set_balance(&non_pool_member, 5); + + // Set up pending commission. + deposit_rewards_and_claim_payout(10, 100); + assert_ok!(Pools::set_commission_claim_permission( + RuntimeOrigin::signed(900), + pool_id, + Some(CommissionClaimPermission::Permissionless) + )); + + // When: + assert_ok!(Pools::claim_commission(RuntimeOrigin::signed(non_pool_member), pool_id)); + + // Then: + assert_eq!(RewardPool::::current_balance(pool_id), 0); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimPermissionUpdated { + pool_id: 1, + permission: Some(CommissionClaimPermission::Permissionless) + }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 }, + ] + ); + + // The pool commission's claim_permission is updated to an adhoc account by the root + // member, which means now only that account (in addition to the root role) can claim + // commission for the pool. + + // Given: + // The account designated to claim commission. + let designated_commission_claimer = 2001; + let _ = Currency::set_balance(&designated_commission_claimer, 5); + + // Set up pending commission. + deposit_rewards_and_claim_payout(10, 100); + assert_ok!(Pools::set_commission_claim_permission( + RuntimeOrigin::signed(900), + pool_id, + Some(CommissionClaimPermission::Account(designated_commission_claimer)) + )); + + // When: + // Previous claimer can no longer claim commission. + assert_noop!( + Pools::claim_commission(RuntimeOrigin::signed(1001), pool_id,), + Error::::DoesNotHavePermission + ); + // Designated claimer can claim commission. + assert_ok!(Pools::claim_commission( + RuntimeOrigin::signed(designated_commission_claimer), + pool_id + )); + + // Then: + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimPermissionUpdated { + pool_id: 1, + permission: Some(CommissionClaimPermission::Account(2001)) + }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 }, + ] + ); + + // Even with an Account claim permission set, the `root` role of the pool can still + // claim commission. + + // Given: + deposit_rewards_and_claim_payout(10, 100); + + // When: + assert_ok!(Pools::claim_commission(RuntimeOrigin::signed(900), pool_id)); + + // Then: + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 }, + ] + ); + + // The root role updates commission's claim_permission back to `None`, which results in + // only the root member being able to claim commission for the pool. + + // Given: + deposit_rewards_and_claim_payout(10, 100); + + // When: + assert_ok!(Pools::set_commission_claim_permission( + RuntimeOrigin::signed(900), + pool_id, + None + )); + // Previous claimer can no longer claim commission. + assert_noop!( + Pools::claim_commission( + RuntimeOrigin::signed(designated_commission_claimer), + pool_id, + ), + Error::::DoesNotHavePermission + ); + // Root can claim commission. + assert_ok!(Pools::claim_commission(RuntimeOrigin::signed(900), pool_id)); + + // Then: + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::PaidOut { member: 10, pool_id, payout: 50 }, + Event::PoolCommissionClaimPermissionUpdated { pool_id: 1, permission: None }, + Event::PoolCommissionClaimed { pool_id: 1, commission: 50 }, + ] + ); + }) + } + + #[test] + fn set_commission_claim_permission_handles_errors() { + ExtBuilder::default().build_and_execute(|| { + let pool_id = 1; + + let _ = Currency::set_balance(&900, 5); + assert_eq!( + pool_events_since_last_call(), + vec![ + Event::Created { depositor: 10, pool_id }, + Event::Bonded { member: 10, pool_id, bonded: 10, joined: true }, + ] + ); + + // Cannot operate on a non-existing pool. + assert_noop!( + Pools::set_commission_claim_permission( + RuntimeOrigin::signed(10), + 90, + Some(CommissionClaimPermission::Permissionless) + ), + Error::::PoolNotFound + ); + + // Only the root role can change the commission claim permission. + assert_noop!( + Pools::set_commission_claim_permission( + RuntimeOrigin::signed(10), + pool_id, + Some(CommissionClaimPermission::Permissionless) + ), + Error::::DoesNotHavePermission + ); }) } } diff --git a/substrate/frame/nomination-pools/src/weights.rs b/substrate/frame/nomination-pools/src/weights.rs index 2cb414fc2a0..047a17c3f9a 100644 --- a/substrate/frame/nomination-pools/src/weights.rs +++ b/substrate/frame/nomination-pools/src/weights.rs @@ -18,9 +18,9 @@ //! Autogenerated weights for `pallet_nomination_pools` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-19, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2023-11-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-guclnr1q-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` // Executed Command: @@ -67,6 +67,7 @@ pub trait WeightInfo { fn set_commission() -> Weight; fn set_commission_max() -> Weight; fn set_commission_change_rate() -> Weight; + fn set_commission_claim_permission() -> Weight; fn set_claim_permission() -> Weight; fn claim_commission() -> Weight; fn adjust_pool_deposit() -> Weight; @@ -80,7 +81,7 @@ impl WeightInfo for SubstrateWeight { /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -105,19 +106,21 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3388` + // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 203_377_000 picoseconds. - Weight::from_parts(206_359_000, 8877) - .saturating_add(T::DbWeight::get().reads(19_u64)) - .saturating_add(T::DbWeight::get().writes(12_u64)) + // Minimum execution time: 184_295_000 picoseconds. + Weight::from_parts(188_860_000, 8877) + .saturating_add(T::DbWeight::get().reads(20_u64)) + .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -136,21 +139,23 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3398` + // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 199_792_000 picoseconds. - Weight::from_parts(206_871_000, 8877) - .saturating_add(T::DbWeight::get().reads(16_u64)) - .saturating_add(T::DbWeight::get().writes(12_u64)) + // Minimum execution time: 188_777_000 picoseconds. + Weight::from_parts(192_646_000, 8877) + .saturating_add(T::DbWeight::get().reads(17_u64)) + .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -169,21 +174,23 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3463` + // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 246_362_000 picoseconds. - Weight::from_parts(253_587_000, 8877) - .saturating_add(T::DbWeight::get().reads(17_u64)) - .saturating_add(T::DbWeight::get().writes(13_u64)) + // Minimum execution time: 221_728_000 picoseconds. + Weight::from_parts(227_569_000, 8877) + .saturating_add(T::DbWeight::get().reads(18_u64)) + .saturating_add(T::DbWeight::get().writes(14_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -192,17 +199,17 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_payout() -> Weight { // Proof Size summary in bytes: - // Measured: `1171` - // Estimated: `3702` - // Minimum execution time: 81_115_000 picoseconds. - Weight::from_parts(83_604_000, 3702) + // Measured: `1172` + // Estimated: `3719` + // Minimum execution time: 75_310_000 picoseconds. + Weight::from_parts(77_709_000, 3719) .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(4_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -233,15 +240,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3674` + // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 187_210_000 picoseconds. - Weight::from_parts(189_477_000, 27847) + // Minimum execution time: 170_656_000 picoseconds. + Weight::from_parts(174_950_000, 27847) .saturating_add(T::DbWeight::get().reads(20_u64)) .saturating_add(T::DbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -252,24 +259,26 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1743` + // Measured: `1817` // Estimated: `4764` - // Minimum execution time: 66_384_000 picoseconds. - Weight::from_parts(69_498_267, 4764) - // Standard Error: 2_566 - .saturating_add(Weight::from_parts(34_528, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) + // Minimum execution time: 68_866_000 picoseconds. + Weight::from_parts(72_312_887, 4764) + // Standard Error: 1_635 + .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -282,6 +291,8 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) @@ -289,21 +300,21 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2171` + // Measured: `2207` // Estimated: `27847` - // Minimum execution time: 137_474_000 picoseconds. - Weight::from_parts(142_341_215, 27847) - // Standard Error: 3_468 - .saturating_add(Weight::from_parts(66_597, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(10_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) + // Minimum execution time: 131_383_000 picoseconds. + Weight::from_parts(136_595_971, 27847) + // Standard Error: 2_715 + .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(11_u64)) + .saturating_add(T::DbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -312,16 +323,18 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Validators` (r:1 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:1 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) @@ -345,12 +358,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2526` + // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 249_135_000 picoseconds. - Weight::from_parts(263_632_571, 27847) - .saturating_add(T::DbWeight::get().reads(23_u64)) - .saturating_add(T::DbWeight::get().writes(19_u64)) + // Minimum execution time: 233_314_000 picoseconds. + Weight::from_parts(241_694_316, 27847) + .saturating_add(T::DbWeight::get().reads(24_u64)) + .saturating_add(T::DbWeight::get().writes(20_u64)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -376,14 +389,12 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) @@ -393,20 +404,22 @@ impl WeightInfo for SubstrateWeight { /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1289` + // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 214_207_000 picoseconds. - Weight::from_parts(221_588_000, 8538) - .saturating_add(T::DbWeight::get().reads(24_u64)) - .saturating_add(T::DbWeight::get().writes(16_u64)) + // Minimum execution time: 171_465_000 picoseconds. + Weight::from_parts(176_478_000, 8538) + .saturating_add(T::DbWeight::get().reads(23_u64)) + .saturating_add(T::DbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -432,34 +445,34 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1849` + // Measured: `1808` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 70_626_000 picoseconds. - Weight::from_parts(73_830_182, 4556) - // Standard Error: 24_496 - .saturating_add(Weight::from_parts(1_561_416, 0).saturating_mul(n.into())) + // Minimum execution time: 63_588_000 picoseconds. + Weight::from_parts(64_930_584, 4556) + // Standard Error: 9_167 + .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(12_u64)) .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(T::DbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1438` + // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 36_542_000 picoseconds. - Weight::from_parts(37_644_000, 4556) + // Minimum execution time: 32_899_000 picoseconds. + Weight::from_parts(33_955_000, 4556) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::Metadata` (r:1 w:1) /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForMetadata` (r:1 w:1) @@ -467,12 +480,12 @@ impl WeightInfo for SubstrateWeight { /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `531` + // Measured: `532` // Estimated: `3735` - // Minimum execution time: 15_130_000 picoseconds. - Weight::from_parts(16_319_671, 3735) - // Standard Error: 351 - .saturating_add(Weight::from_parts(2_024, 0).saturating_mul(n.into())) + // Minimum execution time: 13_778_000 picoseconds. + Weight::from_parts(14_770_006, 3735) + // Standard Error: 151 + .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -492,23 +505,23 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_819_000 picoseconds. - Weight::from_parts(7_253_000, 0) + // Minimum execution time: 4_550_000 picoseconds. + Weight::from_parts(4_935_000, 0) .saturating_add(T::DbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn update_roles() -> Weight { // Proof Size summary in bytes: - // Measured: `531` - // Estimated: `3685` - // Minimum execution time: 19_596_000 picoseconds. - Weight::from_parts(20_828_000, 3685) + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_759_000 picoseconds. + Weight::from_parts(17_346_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -527,15 +540,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2012` + // Measured: `1971` // Estimated: `4556` - // Minimum execution time: 68_551_000 picoseconds. - Weight::from_parts(71_768_000, 4556) + // Minimum execution time: 61_970_000 picoseconds. + Weight::from_parts(63_738_000, 4556) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(5_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -544,34 +557,45 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn set_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `3685` - // Minimum execution time: 36_128_000 picoseconds. - Weight::from_parts(38_547_000, 3685) + // Measured: `804` + // Estimated: `3719` + // Minimum execution time: 31_950_000 picoseconds. + Weight::from_parts(33_190_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_commission_max() -> Weight { // Proof Size summary in bytes: - // Measured: `571` - // Estimated: `3685` - // Minimum execution time: 20_067_000 picoseconds. - Weight::from_parts(21_044_000, 3685) + // Measured: `572` + // Estimated: `3719` + // Minimum execution time: 16_807_000 picoseconds. + Weight::from_parts(17_733_000, 3719) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn set_commission_change_rate() -> Weight { // Proof Size summary in bytes: - // Measured: `531` - // Estimated: `3685` - // Minimum execution time: 19_186_000 picoseconds. - Weight::from_parts(20_189_000, 3685) + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_710_000 picoseconds. + Weight::from_parts(17_563_000, 3719) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn set_commission_claim_permission() -> Weight { + // Proof Size summary in bytes: + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_493_000 picoseconds. + Weight::from_parts(17_022_000, 3719) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -583,13 +607,13 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 15_275_000 picoseconds. - Weight::from_parts(15_932_000, 3702) + // Minimum execution time: 14_248_000 picoseconds. + Weight::from_parts(15_095_000, 3702) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -598,15 +622,15 @@ impl WeightInfo for SubstrateWeight { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `968` - // Estimated: `3685` - // Minimum execution time: 67_931_000 picoseconds. - Weight::from_parts(72_202_000, 3685) + // Measured: `1002` + // Estimated: `3719` + // Minimum execution time: 61_969_000 picoseconds. + Weight::from_parts(63_965_000, 3719) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -615,10 +639,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) fn adjust_pool_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `900` + // Measured: `901` // Estimated: `4764` - // Minimum execution time: 72_783_000 picoseconds. - Weight::from_parts(75_841_000, 4764) + // Minimum execution time: 65_462_000 picoseconds. + Weight::from_parts(67_250_000, 4764) .saturating_add(T::DbWeight::get().reads(4_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -631,7 +655,7 @@ impl WeightInfo for () { /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -656,19 +680,21 @@ impl WeightInfo for () { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn join() -> Weight { // Proof Size summary in bytes: - // Measured: `3388` + // Measured: `3425` // Estimated: `8877` - // Minimum execution time: 203_377_000 picoseconds. - Weight::from_parts(206_359_000, 8877) - .saturating_add(RocksDbWeight::get().reads(19_u64)) - .saturating_add(RocksDbWeight::get().writes(12_u64)) + // Minimum execution time: 184_295_000 picoseconds. + Weight::from_parts(188_860_000, 8877) + .saturating_add(RocksDbWeight::get().reads(20_u64)) + .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -687,21 +713,23 @@ impl WeightInfo for () { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_transfer() -> Weight { // Proof Size summary in bytes: - // Measured: `3398` + // Measured: `3435` // Estimated: `8877` - // Minimum execution time: 199_792_000 picoseconds. - Weight::from_parts(206_871_000, 8877) - .saturating_add(RocksDbWeight::get().reads(16_u64)) - .saturating_add(RocksDbWeight::get().writes(12_u64)) + // Minimum execution time: 188_777_000 picoseconds. + Weight::from_parts(192_646_000, 8877) + .saturating_add(RocksDbWeight::get().reads(17_u64)) + .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -720,21 +748,23 @@ impl WeightInfo for () { /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:2 w:2) /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) fn bond_extra_other() -> Weight { // Proof Size summary in bytes: - // Measured: `3463` + // Measured: `3500` // Estimated: `8877` - // Minimum execution time: 246_362_000 picoseconds. - Weight::from_parts(253_587_000, 8877) - .saturating_add(RocksDbWeight::get().reads(17_u64)) - .saturating_add(RocksDbWeight::get().writes(13_u64)) + // Minimum execution time: 221_728_000 picoseconds. + Weight::from_parts(227_569_000, 8877) + .saturating_add(RocksDbWeight::get().reads(18_u64)) + .saturating_add(RocksDbWeight::get().writes(14_u64)) } /// Storage: `NominationPools::ClaimPermissions` (r:1 w:0) /// Proof: `NominationPools::ClaimPermissions` (`max_values`: None, `max_size`: Some(41), added: 2516, mode: `MaxEncodedLen`) /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -743,17 +773,17 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_payout() -> Weight { // Proof Size summary in bytes: - // Measured: `1171` - // Estimated: `3702` - // Minimum execution time: 81_115_000 picoseconds. - Weight::from_parts(83_604_000, 3702) + // Measured: `1172` + // Estimated: `3719` + // Minimum execution time: 75_310_000 picoseconds. + Weight::from_parts(77_709_000, 3719) .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(4_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -784,15 +814,15 @@ impl WeightInfo for () { /// Proof: `NominationPools::CounterForSubPoolsStorage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn unbond() -> Weight { // Proof Size summary in bytes: - // Measured: `3674` + // Measured: `3622` // Estimated: `27847` - // Minimum execution time: 187_210_000 picoseconds. - Weight::from_parts(189_477_000, 27847) + // Minimum execution time: 170_656_000 picoseconds. + Weight::from_parts(174_950_000, 27847) .saturating_add(RocksDbWeight::get().reads(20_u64)) .saturating_add(RocksDbWeight::get().writes(13_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:1) @@ -803,24 +833,26 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:0) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// The range of component `s` is `[0, 100]`. fn pool_withdraw_unbonded(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1743` + // Measured: `1817` // Estimated: `4764` - // Minimum execution time: 66_384_000 picoseconds. - Weight::from_parts(69_498_267, 4764) - // Standard Error: 2_566 - .saturating_add(Weight::from_parts(34_528, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) + // Minimum execution time: 68_866_000 picoseconds. + Weight::from_parts(72_312_887, 4764) + // Standard Error: 1_635 + .saturating_add(Weight::from_parts(41_679, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(7_u64)) + .saturating_add(RocksDbWeight::get().writes(3_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) @@ -833,6 +865,8 @@ impl WeightInfo for () { /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ClaimPermissions` (r:0 w:1) @@ -840,21 +874,21 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_update(s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2171` + // Measured: `2207` // Estimated: `27847` - // Minimum execution time: 137_474_000 picoseconds. - Weight::from_parts(142_341_215, 27847) - // Standard Error: 3_468 - .saturating_add(Weight::from_parts(66_597, 0).saturating_mul(s.into())) - .saturating_add(RocksDbWeight::get().reads(10_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) + // Minimum execution time: 131_383_000 picoseconds. + Weight::from_parts(136_595_971, 27847) + // Standard Error: 2_715 + .saturating_add(Weight::from_parts(52_351, 0).saturating_mul(s.into())) + .saturating_add(RocksDbWeight::get().reads(11_u64)) + .saturating_add(RocksDbWeight::get().writes(9_u64)) } /// Storage: `NominationPools::PoolMembers` (r:1 w:1) /// Proof: `NominationPools::PoolMembers` (`max_values`: None, `max_size`: Some(237), added: 2712, mode: `MaxEncodedLen`) /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::SubPoolsStorage` (r:1 w:1) /// Proof: `NominationPools::SubPoolsStorage` (`max_values`: None, `max_size`: Some(24382), added: 26857, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) @@ -863,16 +897,18 @@ impl WeightInfo for () { /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::SlashingSpans` (r:1 w:0) /// Proof: `Staking::SlashingSpans` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Staking::Validators` (r:1 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:1 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:1 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForPoolMembers` (r:1 w:1) /// Proof: `NominationPools::CounterForPoolMembers` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::ReversePoolIdLookup` (r:1 w:1) @@ -896,12 +932,12 @@ impl WeightInfo for () { /// The range of component `s` is `[0, 100]`. fn withdraw_unbonded_kill(_s: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `2526` + // Measured: `2525` // Estimated: `27847` - // Minimum execution time: 249_135_000 picoseconds. - Weight::from_parts(263_632_571, 27847) - .saturating_add(RocksDbWeight::get().reads(23_u64)) - .saturating_add(RocksDbWeight::get().writes(19_u64)) + // Minimum execution time: 233_314_000 picoseconds. + Weight::from_parts(241_694_316, 27847) + .saturating_add(RocksDbWeight::get().reads(24_u64)) + .saturating_add(RocksDbWeight::get().writes(20_u64)) } /// Storage: `NominationPools::LastPoolId` (r:1 w:1) /// Proof: `NominationPools::LastPoolId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -927,14 +963,12 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:1) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:1 w:1) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Balances::Locks` (r:2 w:1) /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:2 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) + /// Storage: `NominationPools::TotalValueLocked` (r:1 w:1) + /// Proof: `NominationPools::TotalValueLocked` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForRewardPools` (r:1 w:1) @@ -944,20 +978,22 @@ impl WeightInfo for () { /// Storage: `NominationPools::CounterForReversePoolIdLookup` (r:1 w:1) /// Proof: `NominationPools::CounterForReversePoolIdLookup` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:0 w:1) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) /// Storage: `Staking::Payee` (r:0 w:1) /// Proof: `Staking::Payee` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) fn create() -> Weight { // Proof Size summary in bytes: - // Measured: `1289` + // Measured: `1169` // Estimated: `8538` - // Minimum execution time: 214_207_000 picoseconds. - Weight::from_parts(221_588_000, 8538) - .saturating_add(RocksDbWeight::get().reads(24_u64)) - .saturating_add(RocksDbWeight::get().writes(16_u64)) + // Minimum execution time: 171_465_000 picoseconds. + Weight::from_parts(176_478_000, 8538) + .saturating_add(RocksDbWeight::get().reads(23_u64)) + .saturating_add(RocksDbWeight::get().writes(17_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -983,34 +1019,34 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 16]`. fn nominate(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `1849` + // Measured: `1808` // Estimated: `4556 + n * (2520 ±0)` - // Minimum execution time: 70_626_000 picoseconds. - Weight::from_parts(73_830_182, 4556) - // Standard Error: 24_496 - .saturating_add(Weight::from_parts(1_561_416, 0).saturating_mul(n.into())) + // Minimum execution time: 63_588_000 picoseconds. + Weight::from_parts(64_930_584, 4556) + // Standard Error: 9_167 + .saturating_add(Weight::from_parts(1_595_779, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(12_u64)) .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(n.into()))) .saturating_add(RocksDbWeight::get().writes(5_u64)) .saturating_add(Weight::from_parts(0, 2520).saturating_mul(n.into())) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) fn set_state() -> Weight { // Proof Size summary in bytes: - // Measured: `1438` + // Measured: `1434` // Estimated: `4556` - // Minimum execution time: 36_542_000 picoseconds. - Weight::from_parts(37_644_000, 4556) + // Minimum execution time: 32_899_000 picoseconds. + Weight::from_parts(33_955_000, 4556) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::Metadata` (r:1 w:1) /// Proof: `NominationPools::Metadata` (`max_values`: None, `max_size`: Some(270), added: 2745, mode: `MaxEncodedLen`) /// Storage: `NominationPools::CounterForMetadata` (r:1 w:1) @@ -1018,12 +1054,12 @@ impl WeightInfo for () { /// The range of component `n` is `[1, 256]`. fn set_metadata(n: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `531` + // Measured: `532` // Estimated: `3735` - // Minimum execution time: 15_130_000 picoseconds. - Weight::from_parts(16_319_671, 3735) - // Standard Error: 351 - .saturating_add(Weight::from_parts(2_024, 0).saturating_mul(n.into())) + // Minimum execution time: 13_778_000 picoseconds. + Weight::from_parts(14_770_006, 3735) + // Standard Error: 151 + .saturating_add(Weight::from_parts(1_900, 0).saturating_mul(n.into())) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -1043,23 +1079,23 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_819_000 picoseconds. - Weight::from_parts(7_253_000, 0) + // Minimum execution time: 4_550_000 picoseconds. + Weight::from_parts(4_935_000, 0) .saturating_add(RocksDbWeight::get().writes(6_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn update_roles() -> Weight { // Proof Size summary in bytes: - // Measured: `531` - // Estimated: `3685` - // Minimum execution time: 19_596_000 picoseconds. - Weight::from_parts(20_828_000, 3685) + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_759_000 picoseconds. + Weight::from_parts(17_346_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Staking::Bonded` (r:1 w:0) /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) /// Storage: `Staking::Ledger` (r:1 w:0) @@ -1078,15 +1114,15 @@ impl WeightInfo for () { /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn chill() -> Weight { // Proof Size summary in bytes: - // Measured: `2012` + // Measured: `1971` // Estimated: `4556` - // Minimum execution time: 68_551_000 picoseconds. - Weight::from_parts(71_768_000, 4556) + // Minimum execution time: 61_970_000 picoseconds. + Weight::from_parts(63_738_000, 4556) .saturating_add(RocksDbWeight::get().reads(9_u64)) .saturating_add(RocksDbWeight::get().writes(5_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -1095,34 +1131,45 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn set_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `770` - // Estimated: `3685` - // Minimum execution time: 36_128_000 picoseconds. - Weight::from_parts(38_547_000, 3685) + // Measured: `804` + // Estimated: `3719` + // Minimum execution time: 31_950_000 picoseconds. + Weight::from_parts(33_190_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) /// Proof: `NominationPools::GlobalMaxCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn set_commission_max() -> Weight { // Proof Size summary in bytes: - // Measured: `571` - // Estimated: `3685` - // Minimum execution time: 20_067_000 picoseconds. - Weight::from_parts(21_044_000, 3685) + // Measured: `572` + // Estimated: `3719` + // Minimum execution time: 16_807_000 picoseconds. + Weight::from_parts(17_733_000, 3719) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:1) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) fn set_commission_change_rate() -> Weight { // Proof Size summary in bytes: - // Measured: `531` - // Estimated: `3685` - // Minimum execution time: 19_186_000 picoseconds. - Weight::from_parts(20_189_000, 3685) + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_710_000 picoseconds. + Weight::from_parts(17_563_000, 3719) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `NominationPools::BondedPools` (r:1 w:1) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) + fn set_commission_claim_permission() -> Weight { + // Proof Size summary in bytes: + // Measured: `532` + // Estimated: `3719` + // Minimum execution time: 16_493_000 picoseconds. + Weight::from_parts(17_022_000, 3719) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -1134,13 +1181,13 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `542` // Estimated: `3702` - // Minimum execution time: 15_275_000 picoseconds. - Weight::from_parts(15_932_000, 3702) + // Minimum execution time: 14_248_000 picoseconds. + Weight::from_parts(15_095_000, 3702) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `NominationPools::RewardPools` (r:1 w:1) /// Proof: `NominationPools::RewardPools` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `NominationPools::GlobalMaxCommission` (r:1 w:0) @@ -1149,15 +1196,15 @@ impl WeightInfo for () { /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) fn claim_commission() -> Weight { // Proof Size summary in bytes: - // Measured: `968` - // Estimated: `3685` - // Minimum execution time: 67_931_000 picoseconds. - Weight::from_parts(72_202_000, 3685) + // Measured: `1002` + // Estimated: `3719` + // Minimum execution time: 61_969_000 picoseconds. + Weight::from_parts(63_965_000, 3719) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `NominationPools::BondedPools` (r:1 w:0) - /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(220), added: 2695, mode: `MaxEncodedLen`) + /// Proof: `NominationPools::BondedPools` (`max_values`: None, `max_size`: Some(254), added: 2729, mode: `MaxEncodedLen`) /// Storage: `Balances::Freezes` (r:1 w:1) /// Proof: `Balances::Freezes` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) /// Storage: `System::Account` (r:1 w:1) @@ -1166,10 +1213,10 @@ impl WeightInfo for () { /// Proof: `Balances::Locks` (`max_values`: None, `max_size`: Some(1299), added: 3774, mode: `MaxEncodedLen`) fn adjust_pool_deposit() -> Weight { // Proof Size summary in bytes: - // Measured: `900` + // Measured: `901` // Estimated: `4764` - // Minimum execution time: 72_783_000 picoseconds. - Weight::from_parts(75_841_000, 4764) + // Minimum execution time: 65_462_000 picoseconds. + Weight::from_parts(67_250_000, 4764) .saturating_add(RocksDbWeight::get().reads(4_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } -- GitLab From 0f7ffc66a59526968c5cad39edf63bb54ca11585 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 28 Nov 2023 12:14:50 +0300 Subject: [PATCH 137/199] Added NetworkId::PolkadotBulletin variant (#2517) We're going to bridge Polkadot Bridge Hub with [Polkadot Bulletin chain](https://github.com/zdave-parity/polkadot-bulletin-chain) soon (and Rococo Bridge Hub with 1:1 copy of Polkadot Bulletin chain even sooner), so we need a variant for that chain in `NetworkId`. As suggested, I'm adding a new variant for it to the `NetworkId` (we may have used `ByGenesis(_)`, but decision was made to have a dedicated variant for that). --- polkadot/xcm/src/v3/junction.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index 47429a8c36e..5e6e4ab903b 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -75,6 +75,8 @@ pub enum NetworkId { BitcoinCore, /// The Bitcoin network, including hard-forks supported by Bitcoin Cash developers. BitcoinCash, + /// The Polkadot Bulletin chain. + PolkadotBulletin, } impl From for Option { -- GitLab From 58a1f9c53dbf8766bd703b4fd417a9ca1ab99df3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Tue, 28 Nov 2023 09:18:26 +0000 Subject: [PATCH 138/199] polkadot: disable block authoring backoff on production networks (#2510) Currently the polkadot node will backoff from block authoring if finality starts lagging. This PR disables this mechanism on production networks (polkadot and kusama) and adds a flags to optionally force enabling it. --- .../relay-chain-inprocess-interface/src/lib.rs | 1 + polkadot/cli/src/cli.rs | 4 ++++ polkadot/cli/src/command.rs | 1 + polkadot/node/service/src/lib.rs | 18 ++++++++++++++---- polkadot/node/test/service/src/lib.rs | 1 + .../test-parachains/adder/collator/src/main.rs | 1 + .../undying/collator/src/main.rs | 1 + 7 files changed, 23 insertions(+), 4 deletions(-) diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 42a56b649f0..3d297af00f2 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -286,6 +286,7 @@ fn build_polkadot_full_node( grandpa_pause: None, // Disable BEEFY. It should not be required by the internal relay chain node. enable_beefy: false, + force_authoring_backoff: false, jaeger_agent: None, telemetry_worker_handle, diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index e20e35c9103..6a7a8b4ad7b 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -98,6 +98,10 @@ pub struct RunCmd { #[arg(long)] pub no_beefy: bool, + /// Enable the block authoring backoff that is triggered when finality is lagging. + #[arg(long)] + pub force_authoring_backoff: bool, + /// Add the destination address to the 'Jaeger' agent. /// /// Must be valid socket address, of format `IP:Port` (commonly `127.0.0.1:6831`). diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 2dcf5e0e8d7..c9c3c39aab1 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -259,6 +259,7 @@ where is_parachain_node: service::IsParachainNode::No, grandpa_pause, enable_beefy, + force_authoring_backoff: cli.run.force_authoring_backoff, jaeger_agent, telemetry_worker_handle: None, node_version, diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index c03835b2a4b..fafdc586009 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -625,6 +625,9 @@ pub struct NewFullParams { pub is_parachain_node: IsParachainNode, pub grandpa_pause: Option<(u32, u32)>, pub enable_beefy: bool, + /// Whether to enable the block authoring backoff on production networks + /// where it isn't enabled by default. + pub force_authoring_backoff: bool, pub jaeger_agent: Option, pub telemetry_worker_handle: Option, /// The version of the node. TESTING ONLY: `None` can be passed to skip the node/worker version @@ -716,6 +719,7 @@ pub fn new_full( is_parachain_node, grandpa_pause, enable_beefy, + force_authoring_backoff, jaeger_agent, telemetry_worker_handle, node_version, @@ -733,15 +737,21 @@ pub fn new_full( let is_offchain_indexing_enabled = config.offchain_worker.indexing_enabled; let role = config.role.clone(); let force_authoring = config.force_authoring; - let backoff_authoring_blocks = { + let backoff_authoring_blocks = if !force_authoring_backoff && + (config.chain_spec.is_polkadot() || config.chain_spec.is_kusama()) + { + // the block authoring backoff is disabled by default on production networks + None + } else { let mut backoff = sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default(); if config.chain_spec.is_rococo() || config.chain_spec.is_wococo() || - config.chain_spec.is_versi() + config.chain_spec.is_versi() || + config.chain_spec.is_dev() { - // it's a testnet that's in flux, finality has stalled sometimes due - // to operational issues and it's annoying to slow down block + // on testnets that are in flux (like rococo or versi), finality has stalled + // sometimes due to operational issues and it's annoying to slow down block // production to 1 block per hour. backoff.max_interval = 10; } diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index be2746daf32..a8352b60751 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -82,6 +82,7 @@ pub fn new_full( is_parachain_node, grandpa_pause: None, enable_beefy: true, + force_authoring_backoff: false, jaeger_agent: None, telemetry_worker_handle: None, node_version: None, diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index dfaa1973206..1558df6708d 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -64,6 +64,7 @@ fn main() -> Result<()> { ), grandpa_pause: None, enable_beefy: false, + force_authoring_backoff: false, jaeger_agent: None, telemetry_worker_handle: None, diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index c6b338a20dc..1db1b6f1102 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -84,6 +84,7 @@ fn main() -> Result<()> { ), grandpa_pause: None, enable_beefy: false, + force_authoring_backoff: false, jaeger_agent: None, telemetry_worker_handle: None, -- GitLab From 6dc3e6c909361c6643b8d91c2929c306de4aee4a Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Tue, 28 Nov 2023 14:02:48 +0400 Subject: [PATCH 139/199] Set `frame_system::LastRuntimeUpgrade` after running `try-runtime` migrations (#2515) Sets `frame_system::LastRuntimeUpgrade` after running try-runtime migrations to better emulate real behavior. This fixes an issue where migrations using the spec version to determine whether to execute can incorrectly fail idempotency checks. @s0me0ne-unkn0wn noticed this issue with the session key migration introduced in https://github.com/paritytech/polkadot-sdk/pull/2265. --- substrate/frame/executive/src/lib.rs | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index dec1fe158bd..4eb76325bab 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -362,9 +362,13 @@ where Ok(frame_system::Pallet::::block_weight().total()) } - /// Execute all `OnRuntimeUpgrade` of this runtime. + /// Execute all Migrations of this runtime. /// /// The `checks` param determines whether to execute `pre/post_upgrade` and `try_state` hooks. + /// + /// [`frame_system::LastRuntimeUpgrade`] is set to the current runtime version after + /// migrations execute. This is important for idempotency checks, because some migrations use + /// this value to determine whether or not they should execute. pub fn try_runtime_upgrade(checks: UpgradeCheckSelect) -> Result { let before_all_weight = ::before_all_runtime_migrations(); @@ -372,6 +376,13 @@ where <(COnRuntimeUpgrade, AllPalletsWithSystem) as OnRuntimeUpgrade>::try_on_runtime_upgrade( checks.pre_and_post(), )?; + + frame_system::LastRuntimeUpgrade::::put( + frame_system::LastRuntimeUpgradeInfo::from( + >::get(), + ), + ); + // Nothing should modify the state after the migrations ran: let _guard = StorageNoopGuard::default(); -- GitLab From 6a417eb9c22f329f252bb48afe705fc43962817c Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Tue, 28 Nov 2023 15:02:10 +0400 Subject: [PATCH 140/199] Increase `cargo-check-each-crate-macos` timeout (#2519) --- .gitlab/pipeline/test.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 9c3fa7701c8..190249d2d0f 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -270,7 +270,7 @@ cargo-check-benches: SKIP_WASM_BUILD=1 time cargo check --locked --benches --all; cargo run --locked --release -p node-bench -- ::trie::read::small --json | tee ./artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json; - echo "___Uploading cache for rusty-cachier___"; + echo "___Cache could be uploaded___"; ;; 2) cargo run --locked --release -p node-bench -- ::node::import::sr25519::transfer_keep_alive::paritydb::small --json @@ -439,7 +439,7 @@ cargo-check-each-crate: - .run-immediately # - .collect-artifacts variables: - # $CI_JOB_NAME is set manually so that rusty-cachier can share the cache for all + # $CI_JOB_NAME is set manually so that cache could be shared for all jobs # "cargo-check-each-crate I/N" jobs CI_JOB_NAME: cargo-check-each-crate timeout: 2h @@ -462,10 +462,10 @@ cargo-check-each-crate-macos: variables: SKIP_WASM_BUILD: 1 script: - # TODO: enable rusty-cachier once it supports Mac # TODO: use parallel jobs, as per cargo-check-each-crate, once more Mac runners are available # - time ./scripts/ci/gitlab/check-each-crate.py 1 1 - time cargo check --workspace --locked + timeout: 2h tags: - osx @@ -488,7 +488,7 @@ cargo-hfuzz: # use git version of honggfuzz-rs until v0.5.56 is out, we need a few recent changes: # https://github.com/rust-fuzz/honggfuzz-rs/pull/75 to avoid breakage on debian # https://github.com/rust-fuzz/honggfuzz-rs/pull/81 fix to the above pr - # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling rusty-cachier's absolute CARGO_TARGET_DIR + # https://github.com/rust-fuzz/honggfuzz-rs/pull/82 fix for handling absolute CARGO_TARGET_DIR HFUZZ_BUILD_ARGS: > --config=patch.crates-io.honggfuzz.git="https://github.com/altaua/honggfuzz-rs" --config=patch.crates-io.honggfuzz.rev="205f7c8c059a0d98fe1cb912cdac84f324cb6981" -- GitLab From cd8741c8b591407a78fdfef1dccdd3b5d03e13f4 Mon Sep 17 00:00:00 2001 From: gupnik <17176722+gupnik@users.noreply.github.com> Date: Tue, 28 Nov 2023 18:43:57 +0530 Subject: [PATCH 141/199] Moves all test runtimes to use `derive_impl` (#2409) Step in https://github.com/paritytech/polkadot-sdk/issues/171 This PR adds `derive_impl` on all `frame_system` config impls for mock runtimes. The overridden configs are maintained as of now to ensure minimal changes. --------- Co-authored-by: Oliver Tale-Yazdi --- cumulus/pallets/xcmp-queue/src/mock.rs | 3 +- cumulus/parachains/common/src/impls.rs | 3 +- .../pallets/collective-content/src/mock.rs | 3 +- .../runtimes/starters/seedling/src/lib.rs | 4 +- .../runtimes/starters/shell/src/lib.rs | 3 +- .../runtimes/testing/penpal/src/lib.rs | 3 +- .../testing/rococo-parachain/src/lib.rs | 3 +- .../runtime/common/src/assigned_slots/mod.rs | 4 +- polkadot/runtime/common/src/auctions.rs | 5 +- polkadot/runtime/common/src/claims.rs | 4 +- polkadot/runtime/common/src/crowdloan/mod.rs | 3 +- polkadot/runtime/common/src/impls.rs | 2 + .../runtime/common/src/integration_tests.rs | 3 +- .../runtime/common/src/paras_registrar/mod.rs | 3 +- polkadot/runtime/common/src/purchase.rs | 4 +- polkadot/runtime/common/src/slots/mod.rs | 4 +- polkadot/runtime/parachains/src/mock.rs | 3 +- .../src/fungible/mock.rs | 2 + .../pallet-xcm-benchmarks/src/generic/mock.rs | 1 + polkadot/xcm/pallet-xcm/src/mock.rs | 3 +- polkadot/xcm/procedural/tests/ui.rs | 2 +- polkadot/xcm/xcm-builder/tests/mock/mod.rs | 3 +- .../xcm-simulator/example/src/parachain.rs | 3 +- .../xcm-simulator/example/src/relay_chain.rs | 3 +- .../xcm/xcm-simulator/fuzzer/src/parachain.rs | 3 +- .../xcm-simulator/fuzzer/src/relay_chain.rs | 3 +- .../pallets/template/src/mock.rs | 6 +- .../src/chain_head/subscription/inner.rs | 2 +- substrate/frame/asset-conversion/src/mock.rs | 3 +- substrate/frame/asset-rate/src/mock.rs | 6 +- substrate/frame/assets/src/mock.rs | 3 +- substrate/frame/atomic-swap/src/tests.rs | 6 +- substrate/frame/aura/src/mock.rs | 3 +- .../frame/authority-discovery/src/lib.rs | 3 +- substrate/frame/balances/src/tests/mod.rs | 4 +- .../benchmarking/pov/src/benchmarking.rs | 2 + substrate/frame/benchmarking/pov/src/tests.rs | 2 + substrate/frame/benchmarking/src/baseline.rs | 2 + substrate/frame/benchmarking/src/tests.rs | 3 +- .../frame/benchmarking/src/tests_instance.rs | 3 +- substrate/frame/bounties/src/tests.rs | 3 +- substrate/frame/broker/src/mock.rs | 3 +- substrate/frame/child-bounties/src/tests.rs | 3 +- substrate/frame/collective/src/tests.rs | 4 +- .../contracts/mock-network/src/parachain.rs | 3 +- .../contracts/mock-network/src/relay_chain.rs | 3 +- substrate/frame/contracts/src/tests.rs | 3 + .../frame/conviction-voting/src/tests.rs | 3 +- substrate/frame/core-fellowship/src/tests.rs | 4 +- substrate/frame/democracy/src/tests.rs | 4 +- .../election-provider-multi-phase/src/mock.rs | 3 +- .../election-provider-support/src/onchain.rs | 3 +- substrate/frame/elections-phragmen/src/lib.rs | 3 +- substrate/frame/examples/basic/src/tests.rs | 3 +- .../frame/examples/dev-mode/src/tests.rs | 3 +- .../examples/offchain-worker/src/tests.rs | 3 +- substrate/frame/executive/src/lib.rs | 3 +- substrate/frame/glutton/src/mock.rs | 3 +- substrate/frame/grandpa/src/mock.rs | 3 +- substrate/frame/identity/src/tests.rs | 3 +- substrate/frame/im-online/src/mock.rs | 3 +- substrate/frame/indices/src/mock.rs | 6 +- .../src/lib.rs | 3 +- substrate/frame/lottery/src/mock.rs | 3 +- substrate/frame/membership/src/lib.rs | 3 +- .../message-queue/src/integration_test.rs | 3 +- substrate/frame/message-queue/src/mock.rs | 4 +- .../frame/nft-fractionalization/src/mock.rs | 4 +- substrate/frame/nfts/src/mock.rs | 3 +- substrate/frame/nicks/src/lib.rs | 3 +- substrate/frame/nis/src/mock.rs | 3 +- .../frame/node-authorization/src/mock.rs | 3 +- .../nomination-pools/benchmarking/src/mock.rs | 3 +- substrate/frame/nomination-pools/src/mock.rs | 3 +- .../nomination-pools/test-staking/src/mock.rs | 3 +- .../frame/offences/benchmarking/src/mock.rs | 3 +- substrate/frame/offences/src/mock.rs | 3 +- substrate/frame/paged-list/src/mock.rs | 6 +- substrate/frame/preimage/src/mock.rs | 3 +- .../frame/ranked-collective/src/tests.rs | 3 +- substrate/frame/recovery/src/mock.rs | 3 +- substrate/frame/referenda/src/mock.rs | 3 +- substrate/frame/remark/src/mock.rs | 6 +- substrate/frame/root-offences/src/mock.rs | 3 +- substrate/frame/safe-mode/src/mock.rs | 3 +- substrate/frame/salary/src/tests.rs | 4 +- substrate/frame/scored-pool/src/mock.rs | 3 +- .../frame/session/benchmarking/src/mock.rs | 3 +- substrate/frame/session/src/mock.rs | 3 +- substrate/frame/society/src/mock.rs | 3 +- substrate/frame/staking/src/mock.rs | 3 +- .../frame/state-trie-migration/src/lib.rs | 3 +- substrate/frame/statement/src/mock.rs | 3 +- substrate/frame/sudo/src/mock.rs | 6 +- .../number_of_pallets_exceeds_tuple_size.rs | 3 +- ...umber_of_pallets_exceeds_tuple_size.stderr | 26 ++--- .../pallet_error_too_large.rs | 3 +- .../pallet_error_too_large.stderr | 18 ++-- .../undefined_call_part.rs | 3 +- .../undefined_call_part.stderr | 14 +-- .../undefined_event_part.rs | 3 +- .../undefined_event_part.stderr | 30 +++--- .../undefined_genesis_config_part.rs | 3 +- .../undefined_genesis_config_part.stderr | 30 +++--- .../undefined_inherent_part.rs | 3 +- .../undefined_inherent_part.stderr | 94 +++++++++---------- .../undefined_origin_part.rs | 3 +- .../undefined_origin_part.stderr | 30 +++--- .../undefined_validate_unsigned_part.rs | 3 +- .../undefined_validate_unsigned_part.stderr | 58 ++++++------ substrate/frame/support/test/tests/pallet.rs | 3 +- .../support/test/tests/pallet_instance.rs | 2 + .../tests/pallet_ui/pass/dev_mode_valid.rs | 3 +- .../pallet_ui/pass/no_std_genesis_config.rs | 3 +- .../support/test/tests/runtime_metadata.rs | 3 +- .../support/test/tests/storage_layers.rs | 5 +- substrate/frame/system/benches/bench.rs | 7 +- .../frame/system/benchmarking/src/mock.rs | 2 + substrate/frame/timestamp/src/mock.rs | 3 +- substrate/frame/tips/src/tests.rs | 4 +- .../asset-conversion-tx-payment/src/mock.rs | 2 + .../asset-tx-payment/src/mock.rs | 2 + .../frame/transaction-payment/src/mock.rs | 2 + substrate/frame/treasury/src/tests.rs | 3 +- substrate/frame/tx-pause/src/mock.rs | 3 +- substrate/frame/uniques/src/mock.rs | 3 +- substrate/frame/utility/src/tests.rs | 3 +- substrate/frame/vesting/src/mock.rs | 3 +- substrate/frame/whitelist/src/mock.rs | 3 +- substrate/utils/frame/rpc/support/src/lib.rs | 3 +- 130 files changed, 433 insertions(+), 261 deletions(-) diff --git a/cumulus/pallets/xcmp-queue/src/mock.rs b/cumulus/pallets/xcmp-queue/src/mock.rs index 7c3a3bd1bd0..755b685d5b8 100644 --- a/cumulus/pallets/xcmp-queue/src/mock.rs +++ b/cumulus/pallets/xcmp-queue/src/mock.rs @@ -19,7 +19,7 @@ use core::marker::PhantomData; use cumulus_pallet_parachain_system::AnyRelayNumber; use cumulus_primitives_core::{ChannelInfo, IsSystem, ParaId}; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, Everything, Nothing, OriginTrait}, BoundedSlice, }; @@ -55,6 +55,7 @@ parameter_types! { type AccountId = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/cumulus/parachains/common/src/impls.rs b/cumulus/parachains/common/src/impls.rs index 81d78baba54..50cb1c7f3e8 100644 --- a/cumulus/parachains/common/src/impls.rs +++ b/cumulus/parachains/common/src/impls.rs @@ -122,7 +122,7 @@ impl> ContainsPair for AssetsFr mod tests { use super::*; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, FindAuthor, ValidatorRegistration}, PalletId, }; @@ -155,6 +155,7 @@ mod tests { pub const MaxReserves: u32 = 50; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/cumulus/parachains/pallets/collective-content/src/mock.rs b/cumulus/parachains/pallets/collective-content/src/mock.rs index 2ae5943f332..7a752da71fc 100644 --- a/cumulus/parachains/pallets/collective-content/src/mock.rs +++ b/cumulus/parachains/pallets/collective-content/src/mock.rs @@ -18,7 +18,7 @@ pub use crate as pallet_collective_content; use crate::WeightInfo; use frame_support::{ - ord_parameter_types, parameter_types, + derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64}, weights::Weight, }; @@ -55,6 +55,7 @@ impl pallet_collective_content::Config for Test { type WeightInfo = CCWeightInfo; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = (); type BlockWeights = (); diff --git a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs index 7619ed19798..cb868627e79 100644 --- a/cumulus/parachains/runtimes/starters/seedling/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/seedling/src/lib.rs @@ -44,7 +44,7 @@ use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -134,6 +134,8 @@ parameter_types! { .build_or_panic(); pub const SS58Prefix: u8 = 42; } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; diff --git a/cumulus/parachains/runtimes/starters/shell/src/lib.rs b/cumulus/parachains/runtimes/starters/shell/src/lib.rs index f67c0c19ec6..de95969f71d 100644 --- a/cumulus/parachains/runtimes/starters/shell/src/lib.rs +++ b/cumulus/parachains/runtimes/starters/shell/src/lib.rs @@ -52,7 +52,7 @@ use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -143,6 +143,7 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index b2a04272a4a..9387c454715 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -35,7 +35,7 @@ pub mod xcm_config; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, pallet_prelude::Weight, @@ -323,6 +323,7 @@ parameter_types! { // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs index 3a890d7953a..9d2ba98a67c 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/src/lib.rs @@ -39,7 +39,7 @@ use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, match_types, parameter_types, @@ -184,6 +184,7 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index cb2e5083b0a..f5e3aaef632 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -636,7 +636,7 @@ mod tests { use crate::{assigned_slots, mock::TestRegistrar, slots}; use ::test_helpers::{dummy_head_data, dummy_validation_code}; - use frame_support::{assert_noop, assert_ok, parameter_types}; + use frame_support::{assert_noop, assert_ok, derive_impl, parameter_types}; use frame_system::EnsureRoot; use pallet_balances; use primitives::BlockNumber; @@ -679,6 +679,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/common/src/auctions.rs b/polkadot/runtime/common/src/auctions.rs index 267413eb1ba..baa66d83a3f 100644 --- a/polkadot/runtime/common/src/auctions.rs +++ b/polkadot/runtime/common/src/auctions.rs @@ -677,7 +677,8 @@ mod tests { use crate::{auctions, mock::TestRegistrar}; use ::test_helpers::{dummy_hash, dummy_head_data, dummy_validation_code}; use frame_support::{ - assert_noop, assert_ok, assert_storage_noop, ord_parameter_types, parameter_types, + assert_noop, assert_ok, assert_storage_noop, derive_impl, ord_parameter_types, + parameter_types, traits::{ConstU32, EitherOfDiverse, OnFinalize, OnInitialize}, }; use frame_system::{EnsureRoot, EnsureSignedBy}; @@ -705,6 +706,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/common/src/claims.rs b/polkadot/runtime/common/src/claims.rs index 548adc6fbd5..4f04a79be55 100644 --- a/polkadot/runtime/common/src/claims.rs +++ b/polkadot/runtime/common/src/claims.rs @@ -710,7 +710,7 @@ mod tests { use crate::claims; use claims::Call as ClaimsCall; use frame_support::{ - assert_err, assert_noop, assert_ok, + assert_err, assert_noop, assert_ok, derive_impl, dispatch::{GetDispatchInfo, Pays}, ord_parameter_types, parameter_types, traits::{ConstU32, ExistenceRequirement, WithdrawReasons}, @@ -739,6 +739,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/common/src/crowdloan/mod.rs b/polkadot/runtime/common/src/crowdloan/mod.rs index f67fc12a67f..77ef406e579 100644 --- a/polkadot/runtime/common/src/crowdloan/mod.rs +++ b/polkadot/runtime/common/src/crowdloan/mod.rs @@ -863,7 +863,7 @@ mod tests { use super::*; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, OnFinalize, OnInitialize}, }; use primitives::Id as ParaId; @@ -900,6 +900,7 @@ mod tests { type BlockNumber = u64; + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index e50ffb634b3..60e631a03ee 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -192,6 +192,7 @@ pub mod benchmarks { mod tests { use super::*; use frame_support::{ + derive_impl, dispatch::DispatchClass, parameter_types, traits::{ @@ -237,6 +238,7 @@ mod tests { pub const AvailableBlockRatio: Perbill = Perbill::one(); } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 793f75e79cd..b0d277a702d 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -25,7 +25,7 @@ use crate::{ traits::{AuctionStatus, Auctioneer, Leaser, Registrar as RegistrarT}, }; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, Currency, OnFinalize, OnInitialize}, weights::Weight, PalletId, @@ -114,6 +114,7 @@ parameter_types! { ); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 2d33cf28993..12376ae6f1f 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -699,7 +699,7 @@ mod tests { mock::conclude_pvf_checking, paras_registrar, traits::Registrar as RegistrarTrait, }; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, error::BadOrigin, parameter_types, traits::{ConstU32, OnFinalize, OnInitialize}, @@ -751,6 +751,7 @@ mod tests { limits::BlockLength::max_with_normal_ratio(4 * 1024 * 1024, NORMAL_RATIO); } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/polkadot/runtime/common/src/purchase.rs b/polkadot/runtime/common/src/purchase.rs index bc95483dd7e..06b0a0a0211 100644 --- a/polkadot/runtime/common/src/purchase.rs +++ b/polkadot/runtime/common/src/purchase.rs @@ -484,7 +484,7 @@ mod tests { // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use crate::purchase; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{Currency, WithdrawReasons}, }; use sp_runtime::{ @@ -511,6 +511,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/common/src/slots/mod.rs b/polkadot/runtime/common/src/slots/mod.rs index 01f6365b791..c3aaf8b51b8 100644 --- a/polkadot/runtime/common/src/slots/mod.rs +++ b/polkadot/runtime/common/src/slots/mod.rs @@ -505,7 +505,7 @@ mod tests { use crate::{mock::TestRegistrar, slots}; use ::test_helpers::{dummy_head_data, dummy_validation_code}; - use frame_support::{assert_noop, assert_ok, parameter_types}; + use frame_support::{assert_noop, assert_ok, derive_impl, parameter_types}; use frame_system::EnsureRoot; use pallet_balances; use primitives::BlockNumber; @@ -529,6 +529,8 @@ mod tests { parameter_types! { pub const BlockHashCount: u32 = 250; } + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 9df54bf29d3..222942922f9 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -25,7 +25,7 @@ use crate::{ }; use frame_support::{ - assert_ok, parameter_types, + assert_ok, derive_impl, parameter_types, traits::{ Currency, ProcessMessage, ProcessMessageError, ValidatorSet, ValidatorSetWithIdentification, }, @@ -94,6 +94,7 @@ parameter_types! { pub type AccountId = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs index 9adc706fc18..4d566fd585d 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/fungible/mock.rs @@ -45,6 +45,8 @@ parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs index 710ff0d8019..6efd2304e28 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/mock.rs @@ -51,6 +51,7 @@ parameter_types! { frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1024, u64::MAX)); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/polkadot/xcm/pallet-xcm/src/mock.rs b/polkadot/xcm/pallet-xcm/src/mock.rs index 0b0f795100c..606d51bb8bc 100644 --- a/polkadot/xcm/pallet-xcm/src/mock.rs +++ b/polkadot/xcm/pallet-xcm/src/mock.rs @@ -16,7 +16,7 @@ use codec::Encode; use frame_support::{ - construct_runtime, match_types, parameter_types, + construct_runtime, derive_impl, match_types, parameter_types, traits::{ AsEnsureOriginWithArg, ConstU128, ConstU32, Contains, Equals, Everything, EverythingBut, Nothing, @@ -246,6 +246,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/polkadot/xcm/procedural/tests/ui.rs b/polkadot/xcm/procedural/tests/ui.rs index a30f4d7dee5..b3469b520eb 100644 --- a/polkadot/xcm/procedural/tests/ui.rs +++ b/polkadot/xcm/procedural/tests/ui.rs @@ -21,7 +21,7 @@ fn ui() { // Only run the ui tests when `RUN_UI_TESTS` is set. if std::env::var("RUN_UI_TESTS").is_err() { - return; + return } // As trybuild is using `cargo check`, we don't need the real WASM binaries. diff --git a/polkadot/xcm/xcm-builder/tests/mock/mod.rs b/polkadot/xcm/xcm-builder/tests/mock/mod.rs index 4f183c7a15b..968b294c6a4 100644 --- a/polkadot/xcm/xcm-builder/tests/mock/mod.rs +++ b/polkadot/xcm/xcm-builder/tests/mock/mod.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{ConstU32, Everything, Nothing}, weights::Weight, }; @@ -76,6 +76,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/polkadot/xcm/xcm-simulator/example/src/parachain.rs b/polkadot/xcm/xcm-simulator/example/src/parachain.rs index 9f0411970ce..951e946372d 100644 --- a/polkadot/xcm/xcm-simulator/example/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/parachain.rs @@ -19,7 +19,7 @@ use codec::{Decode, Encode}; use core::marker::PhantomData; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{ContainsPair, EnsureOrigin, EnsureOriginWithArg, Everything, EverythingBut, Nothing}, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; @@ -63,6 +63,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs index bdd7ff6d3ea..20070d192b5 100644 --- a/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/example/src/relay_chain.rs @@ -17,7 +17,7 @@ //! Relay chain runtime mock. use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, Everything, Nothing, ProcessMessage, ProcessMessageError}, weights::{Weight, WeightMeter}, }; @@ -49,6 +49,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs index 41234837aca..0aa1b54f5e7 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/parachain.rs @@ -18,7 +18,7 @@ use codec::{Decode, Encode}; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{Everything, Nothing}, weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, }; @@ -52,6 +52,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs index c9a57db970a..085773f3073 100644 --- a/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs +++ b/polkadot/xcm/xcm-simulator/fuzzer/src/relay_chain.rs @@ -17,7 +17,7 @@ //! Relay chain runtime mock. use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{Everything, Nothing, ProcessMessage, ProcessMessageError}, weights::{Weight, WeightMeter}, }; @@ -48,6 +48,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/substrate/bin/node-template/pallets/template/src/mock.rs b/substrate/bin/node-template/pallets/template/src/mock.rs index 244ae1b3785..8346461e6ed 100644 --- a/substrate/bin/node-template/pallets/template/src/mock.rs +++ b/substrate/bin/node-template/pallets/template/src/mock.rs @@ -1,5 +1,8 @@ use crate as pallet_template; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU16, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -17,6 +20,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index abd42ad9678..2b250f3dc2c 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -763,7 +763,7 @@ impl> SubscriptionsInner { // blocks. for hash in hashes.clone() { if !sub.contains_block(hash) { - return Err(SubscriptionManagementError::BlockHashAbsent); + return Err(SubscriptionManagementError::BlockHashAbsent) } } diff --git a/substrate/frame/asset-conversion/src/mock.rs b/substrate/frame/asset-conversion/src/mock.rs index 4eee701f193..c84263b0796 100644 --- a/substrate/frame/asset-conversion/src/mock.rs +++ b/substrate/frame/asset-conversion/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_asset_conversion; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, instances::{Instance1, Instance2}, ord_parameter_types, parameter_types, traits::{AsEnsureOriginWithArg, ConstU128, ConstU32, ConstU64}, @@ -48,6 +48,7 @@ construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/asset-rate/src/mock.rs b/substrate/frame/asset-rate/src/mock.rs index 9ca0f0f3cc3..041f3740952 100644 --- a/substrate/frame/asset-rate/src/mock.rs +++ b/substrate/frame/asset-rate/src/mock.rs @@ -18,7 +18,10 @@ //! The crate's mock. use crate as pallet_asset_rate; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU16, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -36,6 +39,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index 2c2203bcdad..a4d85b64922 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_assets; use codec::Encode; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; @@ -46,6 +46,7 @@ construct_runtime!( type AccountId = u64; type AssetId = u32; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index 92eb9a04458..7e2f22b1836 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -20,7 +20,10 @@ use super::*; use crate as pallet_atomic_swap; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -38,6 +41,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/aura/src/mock.rs b/substrate/frame/aura/src/mock.rs index 39b798c2f68..14b87089ce3 100644 --- a/substrate/frame/aura/src/mock.rs +++ b/substrate/frame/aura/src/mock.rs @@ -21,7 +21,7 @@ use crate as pallet_aura; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, DisabledValidators}, }; use sp_consensus_aura::{ed25519::AuthorityId, AuthorityIndex}; @@ -41,6 +41,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs index 87b743ae196..3044b41e31d 100644 --- a/substrate/frame/authority-discovery/src/lib.rs +++ b/substrate/frame/authority-discovery/src/lib.rs @@ -169,7 +169,7 @@ mod tests { use super::*; use crate as pallet_authority_discovery; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_application_crypto::Pair; @@ -225,6 +225,7 @@ mod tests { pub const Offset: BlockNumber = 0; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/balances/src/tests/mod.rs b/substrate/frame/balances/src/tests/mod.rs index dd3e5b7a85a..8e834483cbe 100644 --- a/substrate/frame/balances/src/tests/mod.rs +++ b/substrate/frame/balances/src/tests/mod.rs @@ -22,7 +22,7 @@ use crate::{self as pallet_balances, AccountData, Config, CreditOf, Error, Pallet}; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - assert_err, assert_noop, assert_ok, assert_storage_noop, + assert_err, assert_noop, assert_ok, assert_storage_noop, derive_impl, dispatch::{DispatchInfo, GetDispatchInfo}, parameter_types, traits::{ @@ -90,6 +90,8 @@ parameter_types! { ); pub static ExistentialDeposit: u64 = 1; } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/benchmarking/pov/src/benchmarking.rs b/substrate/frame/benchmarking/pov/src/benchmarking.rs index 473947b171a..a24b772ade0 100644 --- a/substrate/frame/benchmarking/pov/src/benchmarking.rs +++ b/substrate/frame/benchmarking/pov/src/benchmarking.rs @@ -339,6 +339,7 @@ frame_benchmarking::benchmarks! { #[cfg(test)] mod mock { + use frame_support::derive_impl; use sp_runtime::{testing::H256, BuildStorage}; type AccountId = u64; @@ -354,6 +355,7 @@ mod mock { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/benchmarking/pov/src/tests.rs b/substrate/frame/benchmarking/pov/src/tests.rs index f09e37a5288..dda29c071de 100644 --- a/substrate/frame/benchmarking/pov/src/tests.rs +++ b/substrate/frame/benchmarking/pov/src/tests.rs @@ -162,6 +162,7 @@ fn noop_is_free() { } mod mock { + use frame_support::derive_impl; use sp_runtime::testing::H256; type Block = frame_system::mocking::MockBlock; @@ -174,6 +175,7 @@ mod mock { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/benchmarking/src/baseline.rs b/substrate/frame/benchmarking/src/baseline.rs index 6cd23ebe028..6451284e012 100644 --- a/substrate/frame/benchmarking/src/baseline.rs +++ b/substrate/frame/benchmarking/src/baseline.rs @@ -110,6 +110,7 @@ benchmarks! { #[cfg(test)] pub mod mock { + use frame_support::derive_impl; use sp_runtime::{testing::H256, BuildStorage}; type AccountId = u64; @@ -124,6 +125,7 @@ pub mod mock { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/benchmarking/src/tests.rs b/substrate/frame/benchmarking/src/tests.rs index e5bacbdb236..7d6cfaad609 100644 --- a/substrate/frame/benchmarking/src/tests.rs +++ b/substrate/frame/benchmarking/src/tests.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use frame_support::{parameter_types, traits::ConstU32}; +use frame_support::{derive_impl, parameter_types, traits::ConstU32}; use sp_runtime::{ testing::H256, traits::{BlakeTwo256, IdentityLookup}, @@ -75,6 +75,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/benchmarking/src/tests_instance.rs b/substrate/frame/benchmarking/src/tests_instance.rs index f2c721c8114..55010840896 100644 --- a/substrate/frame/benchmarking/src/tests_instance.rs +++ b/substrate/frame/benchmarking/src/tests_instance.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use frame_support::traits::ConstU32; +use frame_support::{derive_impl, traits::ConstU32}; use sp_runtime::{ testing::H256, traits::{BlakeTwo256, IdentityLookup}, @@ -85,6 +85,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index 233e41b474c..22e608cce63 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -23,7 +23,7 @@ use super::*; use crate as pallet_bounties; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ tokens::{PayFromAccount, UnityAssetBalanceConversion}, ConstU32, ConstU64, OnInitialize, @@ -59,6 +59,7 @@ parameter_types! { type Balance = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index cab6b7389c0..8b8cfa55abc 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -19,7 +19,7 @@ use crate::{test_fungibles::TestFungibles, *}; use frame_support::{ - assert_ok, ensure, ord_parameter_types, parameter_types, + assert_ok, derive_impl, ensure, ord_parameter_types, parameter_types, traits::{ fungible::{Balanced, Credit, Inspect, ItemOf, Mutate}, nonfungible::Inspect as NftInspect, @@ -47,6 +47,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/child-bounties/src/tests.rs b/substrate/frame/child-bounties/src/tests.rs index 46f8fa65dd3..7de45c73127 100644 --- a/substrate/frame/child-bounties/src/tests.rs +++ b/substrate/frame/child-bounties/src/tests.rs @@ -23,7 +23,7 @@ use super::*; use crate as pallet_child_bounties; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ tokens::{PayFromAccount, UnityAssetBalanceConversion}, ConstU32, ConstU64, OnInitialize, @@ -62,6 +62,7 @@ parameter_types! { type Balance = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/collective/src/tests.rs b/substrate/frame/collective/src/tests.rs index 86b85e07a8b..06a91cf6fe9 100644 --- a/substrate/frame/collective/src/tests.rs +++ b/substrate/frame/collective/src/tests.rs @@ -18,7 +18,7 @@ use super::{Event as CollectiveEvent, *}; use crate as pallet_collective; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, dispatch::Pays, parameter_types, traits::{ConstU32, ConstU64, StorageVersion}, @@ -90,6 +90,8 @@ parameter_types! { frame_system::limits::BlockWeights::simple_max(Weight::MAX); pub static MaxProposalWeight: Weight = default_max_proposal_weight(); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/contracts/mock-network/src/parachain.rs b/substrate/frame/contracts/mock-network/src/parachain.rs index 1465b02f903..2ef579276ae 100644 --- a/substrate/frame/contracts/mock-network/src/parachain.rs +++ b/substrate/frame/contracts/mock-network/src/parachain.rs @@ -23,7 +23,7 @@ use crate::{ }; use core::marker::PhantomData; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, Contains, ContainsPair, Everything, EverythingBut, Nothing}, weights::{ constants::{WEIGHT_PROOF_SIZE_PER_MB, WEIGHT_REF_TIME_PER_SECOND}, @@ -53,6 +53,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/substrate/frame/contracts/mock-network/src/relay_chain.rs b/substrate/frame/contracts/mock-network/src/relay_chain.rs index c59c8e4bfa8..17e36eada25 100644 --- a/substrate/frame/contracts/mock-network/src/relay_chain.rs +++ b/substrate/frame/contracts/mock-network/src/relay_chain.rs @@ -17,7 +17,7 @@ //! Relay chain runtime mock. use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{Contains, Everything, Nothing}, weights::Weight, }; @@ -47,6 +47,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeCall = RuntimeCall; diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 76fd012852a..79ad451964d 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -42,6 +42,7 @@ use assert_matches::assert_matches; use codec::Encode; use frame_support::{ assert_err, assert_err_ignore_postinfo, assert_err_with_weight, assert_noop, assert_ok, + derive_impl, dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, parameter_types, storage::child, @@ -332,6 +333,8 @@ parameter_types! { ); pub static ExistentialDeposit: u64 = 1; } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/conviction-voting/src/tests.rs b/substrate/frame/conviction-voting/src/tests.rs index 850b98b218b..371d0364384 100644 --- a/substrate/frame/conviction-voting/src/tests.rs +++ b/substrate/frame/conviction-voting/src/tests.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, traits::{ConstU32, ConstU64, Contains, Polling, VoteTally}, }; use sp_core::H256; @@ -51,6 +51,7 @@ impl Contains for BaseFilter { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = (); diff --git a/substrate/frame/core-fellowship/src/tests.rs b/substrate/frame/core-fellowship/src/tests.rs index a02c010718c..9ac381ab7f5 100644 --- a/substrate/frame/core-fellowship/src/tests.rs +++ b/substrate/frame/core-fellowship/src/tests.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, pallet_prelude::Weight, parameter_types, traits::{tokens::GetSalary, ConstU32, ConstU64, Everything, IsInVec, TryMapSuccess}, @@ -50,6 +50,8 @@ parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1_000_000, u64::max_value())); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 07a0ef5c3d5..00d8fedca0c 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -20,7 +20,7 @@ use super::*; use crate as pallet_democracy; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, SortedMembers, StorePreimage, @@ -77,6 +77,8 @@ parameter_types! { Weight::from_parts(frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), ); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = BlockWeights; diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 92144351e8f..15a50165ff3 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -21,7 +21,7 @@ use frame_election_provider_support::{ bounds::{DataProviderBounds, ElectionBounds}, data_provider, onchain, ElectionDataProvider, NposSolution, SequentialPhragmen, }; -pub use frame_support::{assert_noop, assert_ok, pallet_prelude::GetDefault}; +pub use frame_support::{assert_noop, assert_ok, derive_impl, pallet_prelude::GetDefault}; use frame_support::{ parameter_types, traits::{ConstU32, Hooks}, @@ -204,6 +204,7 @@ pub fn witness() -> SolutionOrSnapshotSize { .unwrap_or_default() } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; diff --git a/substrate/frame/election-provider-support/src/onchain.rs b/substrate/frame/election-provider-support/src/onchain.rs index 8ac245a360b..412aac45475 100644 --- a/substrate/frame/election-provider-support/src/onchain.rs +++ b/substrate/frame/election-provider-support/src/onchain.rs @@ -182,7 +182,7 @@ impl ElectionProvider for OnChainExecution { mod tests { use super::*; use crate::{ElectionProvider, PhragMMS, SequentialPhragmen}; - use frame_support::{assert_noop, parameter_types}; + use frame_support::{assert_noop, derive_impl, parameter_types}; use sp_npos_elections::Support; use sp_runtime::Perbill; type AccountId = u64; @@ -200,6 +200,7 @@ mod tests { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index e4c56e68f9a..5e50027e344 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -1307,7 +1307,7 @@ mod tests { use super::*; use crate as elections_phragmen; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, dispatch::DispatchResultWithPostInfo, parameter_types, traits::{ConstU32, ConstU64, OnInitialize}, @@ -1321,6 +1321,7 @@ mod tests { }; use substrate_test_utils::assert_eq_uvec; + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/examples/basic/src/tests.rs b/substrate/frame/examples/basic/src/tests.rs index c7b5b9e9a84..e00b1ac01b3 100644 --- a/substrate/frame/examples/basic/src/tests.rs +++ b/substrate/frame/examples/basic/src/tests.rs @@ -19,7 +19,7 @@ use crate::*; use frame_support::{ - assert_ok, + assert_ok, derive_impl, dispatch::{DispatchInfo, GetDispatchInfo}, traits::{ConstU64, OnInitialize}, }; @@ -45,6 +45,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/examples/dev-mode/src/tests.rs b/substrate/frame/examples/dev-mode/src/tests.rs index c7722bc0524..3acedcd0fd1 100644 --- a/substrate/frame/examples/dev-mode/src/tests.rs +++ b/substrate/frame/examples/dev-mode/src/tests.rs @@ -18,7 +18,7 @@ //! Tests for pallet-dev-mode. use crate::*; -use frame_support::{assert_ok, traits::ConstU64}; +use frame_support::{assert_ok, derive_impl, traits::ConstU64}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -39,6 +39,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index 203a59a8af0..48a8d86588c 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -19,7 +19,7 @@ use crate as example_offchain_worker; use crate::*; use codec::Decode; use frame_support::{ - assert_ok, parameter_types, + assert_ok, derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_core::{ @@ -46,6 +46,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index 4eb76325bab..b351819f612 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -761,7 +761,7 @@ mod tests { }; use frame_support::{ - assert_err, parameter_types, + assert_err, derive_impl, parameter_types, traits::{fungible, ConstU32, ConstU64, ConstU8, Currency}, weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight, WeightToFee}, }; @@ -917,6 +917,7 @@ mod tests { write: 100, }; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/glutton/src/mock.rs b/substrate/frame/glutton/src/mock.rs index 4bc40b54788..31b78efc574 100644 --- a/substrate/frame/glutton/src/mock.rs +++ b/substrate/frame/glutton/src/mock.rs @@ -19,7 +19,7 @@ use super::*; use crate as pallet_glutton; use frame_support::{ - assert_ok, + assert_ok, derive_impl, traits::{ConstU32, ConstU64}, }; use sp_core::H256; @@ -38,6 +38,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 9afcec1c797..4766d5c3780 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -27,7 +27,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU128, ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, }; use pallet_session::historical as pallet_session_historical; @@ -66,6 +66,7 @@ impl_opaque_keys! { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 78074df933a..8ac7b4d66cb 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -25,7 +25,7 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64, EitherOfDiverse, Get}, BoundedVec, }; @@ -47,6 +47,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 85da061fe90..2f4e3922026 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -20,7 +20,7 @@ #![cfg(test)] use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, weights::Weight, }; @@ -113,6 +113,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { result } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 7dc6730d34e..913a37fe55b 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -20,7 +20,10 @@ #![cfg(test)] use crate::{self as pallet_indices, Config}; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::BuildStorage; @@ -35,6 +38,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs index 474087777c4..c7ed22d1dd5 100644 --- a/substrate/frame/insecure-randomness-collective-flip/src/lib.rs +++ b/substrate/frame/insecure-randomness-collective-flip/src/lib.rs @@ -169,7 +169,7 @@ mod tests { }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, OnInitialize, Randomness}, }; use frame_system::limits; @@ -189,6 +189,7 @@ mod tests { ::max(2 * 1024); } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs index e50ec3441b2..6e50529619b 100644 --- a/substrate/frame/lottery/src/mock.rs +++ b/substrate/frame/lottery/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_lottery; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, }; use frame_support_test::TestRandomness; @@ -47,6 +47,7 @@ parameter_types! { pub const AvailableBlockRatio: Perbill = Perbill::one(); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index 6fb61f0e491..2f4bf4bc4ff 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -531,7 +531,7 @@ mod tests { }; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64, StorageVersion}, }; use frame_system::EnsureSignedBy; @@ -551,6 +551,7 @@ mod tests { pub static Prime: Option = None; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index 965b96a99ca..53dc204ab9c 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -38,7 +38,7 @@ use crate::{ use crate as pallet_message_queue; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use rand::{rngs::StdRng, Rng, SeedableRng}; @@ -57,6 +57,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index 55a64574354..97246900597 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -24,7 +24,7 @@ use super::*; use crate as pallet_message_queue; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_core::H256; @@ -43,6 +43,8 @@ frame_support::construct_runtime!( MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event}, } ); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/nft-fractionalization/src/mock.rs b/substrate/frame/nft-fractionalization/src/mock.rs index 987c65a8954..855109adcbe 100644 --- a/substrate/frame/nft-fractionalization/src/mock.rs +++ b/substrate/frame/nft-fractionalization/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_nft_fractionalization; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, BoundedVec, PalletId, }; @@ -49,6 +49,8 @@ construct_runtime!( Nfts: pallet_nfts, } ); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/nfts/src/mock.rs b/substrate/frame/nfts/src/mock.rs index 248522aafff..4363eccc7ff 100644 --- a/substrate/frame/nfts/src/mock.rs +++ b/substrate/frame/nfts/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_nfts; use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; @@ -46,6 +46,7 @@ pub type Signature = MultiSignature; pub type AccountPublic = ::Signer; pub type AccountId = ::AccountId; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/nicks/src/lib.rs b/substrate/frame/nicks/src/lib.rs index ad30c628adf..540777f87ca 100644 --- a/substrate/frame/nicks/src/lib.rs +++ b/substrate/frame/nicks/src/lib.rs @@ -253,7 +253,7 @@ mod tests { use crate as pallet_nicks; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, traits::{ConstU32, ConstU64}, }; use frame_system::EnsureSignedBy; @@ -274,6 +274,7 @@ mod tests { } ); + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/nis/src/mock.rs b/substrate/frame/nis/src/mock.rs index 30f7ef95f33..be6e79ac6f6 100644 --- a/substrate/frame/nis/src/mock.rs +++ b/substrate/frame/nis/src/mock.rs @@ -20,7 +20,7 @@ use crate::{self as pallet_nis, Perquintill, WithMaximumOf}; use frame_support::{ - ord_parameter_types, parameter_types, + derive_impl, ord_parameter_types, parameter_types, traits::{ fungible::Inspect, ConstU16, ConstU32, ConstU64, Everything, OnFinalize, OnInitialize, StorageMapShim, @@ -50,6 +50,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/node-authorization/src/mock.rs b/substrate/frame/node-authorization/src/mock.rs index 84e3336b3bd..3c99d41b89e 100644 --- a/substrate/frame/node-authorization/src/mock.rs +++ b/substrate/frame/node-authorization/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_node_authorization; use frame_support::{ - ord_parameter_types, + derive_impl, ord_parameter_types, traits::{ConstU32, ConstU64}, }; use frame_system::EnsureSignedBy; @@ -43,6 +43,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type DbWeight = (); diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 9a7f2197a7b..095df219dc8 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -17,7 +17,7 @@ use crate::VoterBagsListInstance; use frame_election_provider_support::VoteWeight; -use frame_support::{pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; +use frame_support::{derive_impl, pallet_prelude::*, parameter_types, traits::ConstU64, PalletId}; use sp_runtime::{ traits::{Convert, IdentityLookup}, BuildStorage, FixedU128, Perbill, @@ -28,6 +28,7 @@ type Nonce = u32; type BlockNumber = u64; type Balance = u128; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/nomination-pools/src/mock.rs b/substrate/frame/nomination-pools/src/mock.rs index 24bea0b87f2..1bd969230da 100644 --- a/substrate/frame/nomination-pools/src/mock.rs +++ b/substrate/frame/nomination-pools/src/mock.rs @@ -17,7 +17,7 @@ use super::*; use crate::{self as pools}; -use frame_support::{assert_ok, parameter_types, traits::fungible::Mutate, PalletId}; +use frame_support::{assert_ok, derive_impl, parameter_types, traits::fungible::Mutate, PalletId}; use frame_system::RawOrigin; use sp_runtime::{BuildStorage, FixedU128}; use sp_staking::{OnStakingUpdate, Stake}; @@ -209,6 +209,7 @@ impl sp_staking::StakingInterface for StakingMock { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type SS58Prefix = (); type BaseCallFilter = frame_support::traits::Everything; diff --git a/substrate/frame/nomination-pools/test-staking/src/mock.rs b/substrate/frame/nomination-pools/test-staking/src/mock.rs index 0db24e9c244..ee24b53db06 100644 --- a/substrate/frame/nomination-pools/test-staking/src/mock.rs +++ b/substrate/frame/nomination-pools/test-staking/src/mock.rs @@ -17,7 +17,7 @@ use frame_election_provider_support::VoteWeight; use frame_support::{ - assert_ok, + assert_ok, derive_impl, pallet_prelude::*, parameter_types, traits::{ConstU64, ConstU8}, @@ -38,6 +38,7 @@ pub(crate) type T = Runtime; pub(crate) const POOL1_BONDED: AccountId = 20318131474730217858575332831085u128; pub(crate) const POOL1_REWARD: AccountId = 20397359637244482196168876781421u128; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 1a458ec90d5..62ce1990112 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -25,7 +25,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use frame_system as system; @@ -40,6 +40,7 @@ type AccountId = u64; type Nonce = u32; type Balance = u64; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 990ceae5ac0..61f680f6db9 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -23,7 +23,7 @@ use crate as offences; use crate::Config; use codec::Encode; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, weights::{constants::RocksDbWeight, Weight}, }; @@ -75,6 +75,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/paged-list/src/mock.rs b/substrate/frame/paged-list/src/mock.rs index 390b4a8530d..37bdc4f157c 100644 --- a/substrate/frame/paged-list/src/mock.rs +++ b/substrate/frame/paged-list/src/mock.rs @@ -20,7 +20,10 @@ #![cfg(feature = "std")] use crate::{paged_list::StoragePagedListMeta, Config, ListPrefix}; -use frame_support::traits::{ConstU16, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU16, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -38,6 +41,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/preimage/src/mock.rs b/substrate/frame/preimage/src/mock.rs index 0f966312d9e..357f088f5ba 100644 --- a/substrate/frame/preimage/src/mock.rs +++ b/substrate/frame/preimage/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_preimage; use frame_support::{ - ord_parameter_types, + derive_impl, ord_parameter_types, traits::{fungible::HoldConsideration, ConstU32, ConstU64, Everything}, weights::constants::RocksDbWeight, }; @@ -43,6 +43,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/ranked-collective/src/tests.rs b/substrate/frame/ranked-collective/src/tests.rs index ba8c5a0f937..3a557065160 100644 --- a/substrate/frame/ranked-collective/src/tests.rs +++ b/substrate/frame/ranked-collective/src/tests.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, error::BadOrigin, parameter_types, traits::{ConstU16, ConstU32, ConstU64, EitherOf, Everything, MapSuccess, Polling}, @@ -45,6 +45,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index bc81d07bec2..44cbeec0986 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as recovery; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, OnFinalize, OnInitialize}, }; use sp_core::H256; @@ -41,6 +41,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index 345accbe268..b75558723e9 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_referenda; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - assert_ok, ord_parameter_types, parameter_types, + assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU32, ConstU64, Contains, EqualPrivilegeOnly, OnInitialize, OriginTrait, Polling, SortedMembers, @@ -59,6 +59,7 @@ impl Contains for BaseFilter { parameter_types! { pub MaxWeight: Weight = Weight::from_parts(2_000_000_000_000, u64::MAX); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = BaseFilter; type BlockWeights = (); diff --git a/substrate/frame/remark/src/mock.rs b/substrate/frame/remark/src/mock.rs index e597a1ca4df..0a385c30eac 100644 --- a/substrate/frame/remark/src/mock.rs +++ b/substrate/frame/remark/src/mock.rs @@ -18,7 +18,10 @@ //! Test environment for remarks pallet. use crate as pallet_remark; -use frame_support::traits::{ConstU16, ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU16, ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -36,6 +39,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 82da429e00a..5e04e9abc13 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -23,7 +23,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, Hooks, OneSessionHandler}, }; use pallet_staking::StakerStatus; @@ -84,6 +84,7 @@ impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { type Public = UintAuthorityId; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/safe-mode/src/mock.rs b/substrate/frame/safe-mode/src/mock.rs index 10afe5bd4b5..7574d64d59d 100644 --- a/substrate/frame/safe-mode/src/mock.rs +++ b/substrate/frame/safe-mode/src/mock.rs @@ -23,7 +23,7 @@ use super::*; use crate as pallet_safe_mode; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU64, Everything, InsideBoth, InstanceFilter, IsInVec, SafeModeNotify}, }; use frame_system::EnsureSignedBy; @@ -33,6 +33,7 @@ use sp_runtime::{ BuildStorage, }; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = InsideBoth; type BlockWeights = (); diff --git a/substrate/frame/salary/src/tests.rs b/substrate/frame/salary/src/tests.rs index 1136ea746f6..fbca1be1188 100644 --- a/substrate/frame/salary/src/tests.rs +++ b/substrate/frame/salary/src/tests.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use frame_support::{ - assert_noop, assert_ok, + assert_noop, assert_ok, derive_impl, pallet_prelude::Weight, parameter_types, traits::{tokens::ConvertRank, ConstU32, ConstU64, Everything}, @@ -49,6 +49,8 @@ parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::from_parts(1_000_000, 0)); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/scored-pool/src/mock.rs b/substrate/frame/scored-pool/src/mock.rs index 32a66c0cdc5..6c032ab808c 100644 --- a/substrate/frame/scored-pool/src/mock.rs +++ b/substrate/frame/scored-pool/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_scored_pool; use frame_support::{ - construct_runtime, ord_parameter_types, parameter_types, + construct_runtime, derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64}, }; use frame_system::EnsureSignedBy; @@ -51,6 +51,7 @@ ord_parameter_types! { pub const ScoreOrigin: u64 = 3; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 47c337569a0..5c00d4bb491 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -24,7 +24,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_runtime::{traits::IdentityLookup, BuildStorage}; @@ -45,6 +45,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/session/src/mock.rs b/substrate/frame/session/src/mock.rs index 2db54e1a597..f3f18fde168 100644 --- a/substrate/frame/session/src/mock.rs +++ b/substrate/frame/session/src/mock.rs @@ -35,7 +35,7 @@ use sp_staking::SessionIndex; use sp_state_machine::BasicExternalities; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; @@ -232,6 +232,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { sp_io::TestExternalities::new(t) } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/society/src/mock.rs b/substrate/frame/society/src/mock.rs index 0bee08236f7..3e29d01ca8e 100644 --- a/substrate/frame/society/src/mock.rs +++ b/substrate/frame/society/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_society; use frame_support::{ - assert_noop, assert_ok, ord_parameter_types, parameter_types, + assert_noop, assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ConstU32, ConstU64}, }; use frame_support_test::TestRandomness; @@ -58,6 +58,7 @@ ord_parameter_types! { pub const MaxBids: u32 = 10; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 364d125792b..90b0ee0260d 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -23,7 +23,7 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, VoteWeight, }; use frame_support::{ - assert_ok, ord_parameter_types, parameter_types, + assert_ok, derive_impl, ord_parameter_types, parameter_types, traits::{ ConstU64, Currency, EitherOfDiverse, FindAuthor, Get, Hooks, Imbalance, OnUnbalanced, OneSessionHandler, @@ -124,6 +124,7 @@ parameter_types! { pub static Offset: BlockNumber = 0; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/state-trie-migration/src/lib.rs b/substrate/frame/state-trie-migration/src/lib.rs index ac3996459cd..5330634ca07 100644 --- a/substrate/frame/state-trie-migration/src/lib.rs +++ b/substrate/frame/state-trie-migration/src/lib.rs @@ -1051,7 +1051,7 @@ mod mock { use super::*; use crate as pallet_state_trie_migration; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, Hooks}, weights::Weight, }; @@ -1081,6 +1081,7 @@ mod mock { pub const SS58Prefix: u8 = 42; } + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/statement/src/mock.rs b/substrate/frame/statement/src/mock.rs index 10a74e100df..192baa1f218 100644 --- a/substrate/frame/statement/src/mock.rs +++ b/substrate/frame/statement/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_statement; use frame_support::{ - ord_parameter_types, + derive_impl, ord_parameter_types, traits::{ConstU32, ConstU64, Everything}, weights::constants::RocksDbWeight, }; @@ -47,6 +47,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Everything; type BlockWeights = (); diff --git a/substrate/frame/sudo/src/mock.rs b/substrate/frame/sudo/src/mock.rs index 6f123b7c82b..878e9239080 100644 --- a/substrate/frame/sudo/src/mock.rs +++ b/substrate/frame/sudo/src/mock.rs @@ -19,7 +19,10 @@ use super::*; use crate as sudo; -use frame_support::traits::{ConstU32, Contains}; +use frame_support::{ + derive_impl, + traits::{ConstU32, Contains}, +}; use sp_core::{ConstU64, H256}; use sp_io; use sp_runtime::{ @@ -108,6 +111,7 @@ impl Contains for BlockEverything { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = BlockEverything; type BlockWeights = (); diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs index ea52293a673..78ae6f57f08 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:66:2 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:67:2 | -66 | pub struct Runtime +67 | pub struct Runtime | ^^^ error[E0412]: cannot find type `RuntimeCall` in this scope @@ -22,38 +22,38 @@ error[E0412]: cannot find type `Runtime` in this scope | ^^^^^^^ not found in this scope error[E0412]: cannot find type `Runtime` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:39:31 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:40:31 | -39 | impl frame_system::Config for Runtime { +40 | impl frame_system::Config for Runtime { | ^^^^^^^ not found in this scope error[E0412]: cannot find type `RuntimeOrigin` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:41:23 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:42:23 | -41 | type RuntimeOrigin = RuntimeOrigin; +42 | type RuntimeOrigin = RuntimeOrigin; | ^^^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeOrigin` error[E0412]: cannot find type `RuntimeCall` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:43:21 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:44:21 | -43 | type RuntimeCall = RuntimeCall; +44 | type RuntimeCall = RuntimeCall; | ^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeCall` error[E0412]: cannot find type `RuntimeEvent` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:49:22 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:50:22 | -49 | type RuntimeEvent = RuntimeEvent; +50 | type RuntimeEvent = RuntimeEvent; | ^^^^^^^^^^^^ help: you might have meant to use the associated type: `Self::RuntimeEvent` error[E0412]: cannot find type `PalletInfo` in this scope - --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:55:20 + --> tests/construct_runtime_ui/number_of_pallets_exceeds_tuple_size.rs:56:20 | -55 | type PalletInfo = PalletInfo; +56 | type PalletInfo = PalletInfo; | ^^^^^^^^^^ | help: you might have meant to use the associated type | -55 | type PalletInfo = Self::PalletInfo; +56 | type PalletInfo = Self::PalletInfo; | ~~~~~~~~~~~~~~~~ help: consider importing one of these items | diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs b/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs index 2834b5b8f2a..d3e519af551 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/pallet_error_too_large.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -61,6 +61,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic tests/construct_runtime_ui/pallet_error_too_large.rs:90:1 + --> tests/construct_runtime_ui/pallet_error_too_large.rs:91:1 | -90 | / construct_runtime! { -91 | | pub struct Runtime -92 | | { -93 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, -94 | | Pallet: pallet::{Pallet}, -95 | | } -96 | | } - | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:90:1 +91 | / construct_runtime! { +92 | | pub struct Runtime +93 | | { +94 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +95 | | Pallet: pallet::{Pallet}, +96 | | } +97 | | } + | |_^ the evaluated program panicked at 'The maximum encoded size of the error type in the `Pallet` pallet exceeds `MAX_MODULE_ERROR_ENCODED_SIZE`', $DIR/tests/construct_runtime_ui/pallet_error_too_large.rs:91:1 | = note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs index 62c4b1327e0..8193d12120c 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_call_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet::{Pallet, Call}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet::{Pallet, Call}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs index 893690501a8..ef3a790b61a 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_event_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Event}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Event}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Event` in module `pallet` - --> tests/construct_runtime_ui/undefined_event_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_event_part.rs:66:1 | -65 | / construct_runtime! { -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Event}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Event}, +71 | | } +72 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs index a3501ca31a3..b4dd41750c8 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Config}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Config}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `GenesisConfig` in module `pallet` - --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_genesis_config_part.rs:66:1 | -65 | / construct_runtime! { -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Config}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Config}, +71 | | } +72 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs index e22745930d6..5e0b8f3c44f 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `create_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `create_inherent` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -37,19 +37,19 @@ error[E0599]: no function or associated item named `create_inherent` found for s = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `is_inherent` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -58,19 +58,19 @@ error[E0599]: no function or associated item named `is_inherent` found for struc = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `check_inherent` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `check_inherent` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -79,19 +79,19 @@ error[E0599]: no function or associated item named `check_inherent` found for st = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- associated item `INHERENT_IDENTIFIER` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope @@ -100,19 +100,19 @@ error[E0599]: no associated item named `INHERENT_IDENTIFIER` found for struct `p = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent_required` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_inherent_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_inherent_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `is_inherent_required` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | _^ -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, -70 | | } -71 | | } +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Inherent}, +71 | | } +72 | | } | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs index 656365279b8..40a4a1ebcb5 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Origin}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Origin}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0412]: cannot find type `Origin` in module `pallet` - --> tests/construct_runtime_ui/undefined_origin_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_origin_part.rs:66:1 | -65 | / construct_runtime! { -66 | | pub struct Runtime -67 | | { -68 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, -69 | | Pallet: pallet expanded::{}::{Pallet, Origin}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system expanded::{}::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet expanded::{}::{Pallet, Origin}, +71 | | } +72 | | } | |_^ not found in `pallet` | = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs index 05545821ab0..be9e4ac2c30 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -36,6 +36,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic, Event}, -69 | | Pallet: pallet::{Pallet, ValidateUnsigned}, -70 | | } -71 | | } +66 | / construct_runtime! { +67 | | pub struct Runtime +68 | | { +69 | | System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | | Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | | } +72 | | } | |_- in this macro invocation | = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no variant or associated item named `Pallet` found for enum `RuntimeCall` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:69:3 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:70:3 | -65 | // construct_runtime! { -66 | || pub struct Runtime -67 | || { -68 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -69 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +66 | // construct_runtime! { +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, | || -^^^^^^ variant or associated item not found in `RuntimeCall` | ||________| | | ... | error[E0599]: no function or associated item named `pre_dispatch` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `pre_dispatch` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | __^ | | _| | || -66 | || pub struct Runtime -67 | || { -68 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -69 | || Pallet: pallet::{Pallet, ValidateUnsigned}, -70 | || } -71 | || } +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | || } +72 | || } | ||_- in this macro invocation ... | | @@ -54,21 +54,21 @@ error[E0599]: no function or associated item named `pre_dispatch` found for stru = note: this error originates in the macro `frame_support::construct_runtime` which comes from the expansion of the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `validate_unsigned` found for struct `pallet::Pallet` in the current scope - --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:65:1 + --> tests/construct_runtime_ui/undefined_validate_unsigned_part.rs:66:1 | 28 | pub struct Pallet(_); | -------------------- function or associated item `validate_unsigned` not found for this struct ... -65 | construct_runtime! { +66 | construct_runtime! { | __^ | | _| | || -66 | || pub struct Runtime -67 | || { -68 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, -69 | || Pallet: pallet::{Pallet, ValidateUnsigned}, -70 | || } -71 | || } +67 | || pub struct Runtime +68 | || { +69 | || System: frame_system::{Pallet, Call, Storage, Config, Event}, +70 | || Pallet: pallet::{Pallet, ValidateUnsigned}, +71 | || } +72 | || } | ||_- in this macro invocation ... | | diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 00e7adafb0b..1afb8fdf497 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -16,7 +16,7 @@ // limitations under the License. use frame_support::{ - assert_ok, + assert_ok, derive_impl, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Parameter, Pays}, dispatch_context::with_context, pallet_prelude::{StorageInfoTrait, ValueQuery}, @@ -682,6 +682,7 @@ frame_support::parameter_types!( pub const MyGetParam3: u32 = 12; ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index 724734ec4fc..e9ac03302b2 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -16,6 +16,7 @@ // limitations under the License. use frame_support::{ + derive_impl, dispatch::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, pallet_prelude::ValueQuery, parameter_types, @@ -292,6 +293,7 @@ pub mod pallet2 { } } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs b/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs index bf26cfd95b1..4dc33991b12 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/dev_mode_valid.rs @@ -17,7 +17,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::traits::ConstU32; +use frame_support::{derive_impl, traits::ConstU32}; pub use pallet::*; @@ -70,6 +70,7 @@ pub mod pallet { impl Pallet {} } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type RuntimeOrigin = RuntimeOrigin; diff --git a/substrate/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs b/substrate/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs index 9ab486c718c..de856ddcd3e 100644 --- a/substrate/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs +++ b/substrate/frame/support/test/tests/pallet_ui/pass/no_std_genesis_config.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::construct_runtime; +use frame_support::{construct_runtime, derive_impl}; use sp_core::sr25519; use sp_runtime::{generic, traits::BlakeTwo256}; @@ -27,6 +27,7 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BlockWeights = (); type BlockLength = (); diff --git a/substrate/frame/support/test/tests/storage_layers.rs b/substrate/frame/support/test/tests/storage_layers.rs index b825c85f956..a6d16e0d66d 100644 --- a/substrate/frame/support/test/tests/storage_layers.rs +++ b/substrate/frame/support/test/tests/storage_layers.rs @@ -16,8 +16,8 @@ // limitations under the License. use frame_support::{ - assert_noop, assert_ok, dispatch::DispatchResult, ensure, pallet_prelude::ConstU32, - storage::with_storage_layer, + assert_noop, assert_ok, derive_impl, dispatch::DispatchResult, ensure, + pallet_prelude::ConstU32, storage::with_storage_layer, }; use pallet::*; use sp_io::TestExternalities; @@ -64,6 +64,7 @@ pub type Header = sp_runtime::generic::Header; pub type Block = sp_runtime::generic::Block; +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/system/benches/bench.rs b/substrate/frame/system/benches/bench.rs index da8bb10fd4e..79d5a2d8689 100644 --- a/substrate/frame/system/benches/bench.rs +++ b/substrate/frame/system/benches/bench.rs @@ -16,7 +16,10 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::traits::{ConstU32, ConstU64}; +use frame_support::{ + derive_impl, + traits::{ConstU32, ConstU64}, +}; use sp_core::H256; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, @@ -57,6 +60,8 @@ frame_support::parameter_types! { 4 * 1024 * 1024, Perbill::from_percent(75), ); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/system/benchmarking/src/mock.rs b/substrate/frame/system/benchmarking/src/mock.rs index 4e6b1221da3..9a81cddca14 100644 --- a/substrate/frame/system/benchmarking/src/mock.rs +++ b/substrate/frame/system/benchmarking/src/mock.rs @@ -20,6 +20,7 @@ #![cfg(test)] use codec::Encode; +use frame_support::derive_impl; use sp_runtime::{traits::IdentityLookup, BuildStorage}; type AccountId = u64; @@ -34,6 +35,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/timestamp/src/mock.rs b/substrate/frame/timestamp/src/mock.rs index 418d257b3f0..b75bcaeb0e0 100644 --- a/substrate/frame/timestamp/src/mock.rs +++ b/substrate/frame/timestamp/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_timestamp; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; use sp_core::H256; @@ -42,6 +42,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index 189623b520e..d5c8e17a396 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -27,7 +27,7 @@ use sp_runtime::{ use sp_storage::Storage; use frame_support::{ - assert_noop, assert_ok, parameter_types, + assert_noop, assert_ok, derive_impl, parameter_types, storage::StoragePrefixedMap, traits::{ tokens::{PayFromAccount, UnityAssetBalanceConversion}, @@ -56,6 +56,8 @@ frame_support::construct_runtime!( parameter_types! { pub const AvailableBlockRatio: Perbill = Perbill::one(); } + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs index 76c78fb4222..081e8e53db2 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/mock.rs @@ -18,6 +18,7 @@ use crate as pallet_asset_conversion_tx_payment; use codec; use frame_support::{ + derive_impl, dispatch::DispatchClass, instances::Instance2, ord_parameter_types, @@ -78,6 +79,7 @@ parameter_types! { pub static TransactionByteFee: u64 = 1; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs index 5fa8a4ab27d..c9b00be8e2c 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/mock.rs @@ -18,6 +18,7 @@ use crate as pallet_asset_tx_payment; use codec; use frame_support::{ + derive_impl, dispatch::DispatchClass, pallet_prelude::*, parameter_types, @@ -70,6 +71,7 @@ parameter_types! { pub static TransactionByteFee: u64 = 1; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/transaction-payment/src/mock.rs b/substrate/frame/transaction-payment/src/mock.rs index 419989bef12..d6686d44c80 100644 --- a/substrate/frame/transaction-payment/src/mock.rs +++ b/substrate/frame/transaction-payment/src/mock.rs @@ -22,6 +22,7 @@ use sp_core::H256; use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; use frame_support::{ + derive_impl, dispatch::DispatchClass, parameter_types, traits::{ConstU32, ConstU64, Imbalance, OnUnbalanced}, @@ -69,6 +70,7 @@ parameter_types! { pub static OperationalFeeMultiplier: u8 = 5; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = BlockWeights; diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index 522ecf6b18f..093757b2770 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -27,7 +27,7 @@ use sp_runtime::{ }; use frame_support::{ - assert_err_ignore_postinfo, assert_noop, assert_ok, + assert_err_ignore_postinfo, assert_noop, assert_ok, derive_impl, pallet_prelude::Pays, parameter_types, traits::{ @@ -54,6 +54,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/tx-pause/src/mock.rs b/substrate/frame/tx-pause/src/mock.rs index 66218c8c015..4f1c981abc6 100644 --- a/substrate/frame/tx-pause/src/mock.rs +++ b/substrate/frame/tx-pause/src/mock.rs @@ -23,7 +23,7 @@ use super::*; use crate as pallet_tx_pause; use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU64, Everything, InsideBoth, InstanceFilter}, }; use frame_system::EnsureSignedBy; @@ -36,6 +36,7 @@ use sp_runtime::{ parameter_types! { pub const BlockHashCount: u64 = 250; } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = InsideBoth; type BlockWeights = (); diff --git a/substrate/frame/uniques/src/mock.rs b/substrate/frame/uniques/src/mock.rs index 1f62c3c4e93..056c19ec559 100644 --- a/substrate/frame/uniques/src/mock.rs +++ b/substrate/frame/uniques/src/mock.rs @@ -21,7 +21,7 @@ use super::*; use crate as pallet_uniques; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, traits::{AsEnsureOriginWithArg, ConstU32, ConstU64}, }; use sp_core::H256; @@ -41,6 +41,7 @@ construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index 01e3f5264bf..7b42fa511d1 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -23,7 +23,7 @@ use super::*; use crate as utility; use frame_support::{ - assert_err_ignore_postinfo, assert_noop, assert_ok, + assert_err_ignore_postinfo, assert_noop, assert_ok, derive_impl, dispatch::{DispatchErrorWithPostInfo, Pays}, error::BadOrigin, parameter_types, storage, @@ -144,6 +144,7 @@ parameter_types! { pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(Weight::MAX); } +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = TestBaseCallFilter; type BlockWeights = BlockWeights; diff --git a/substrate/frame/vesting/src/mock.rs b/substrate/frame/vesting/src/mock.rs index 13d6d5ba57a..67444b8d125 100644 --- a/substrate/frame/vesting/src/mock.rs +++ b/substrate/frame/vesting/src/mock.rs @@ -16,7 +16,7 @@ // limitations under the License. use frame_support::{ - parameter_types, + derive_impl, parameter_types, traits::{ConstU32, ConstU64, WithdrawReasons}, }; use sp_core::H256; @@ -39,6 +39,7 @@ frame_support::construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; type AccountId = u64; diff --git a/substrate/frame/whitelist/src/mock.rs b/substrate/frame/whitelist/src/mock.rs index 4e70a503c28..200e589c6aa 100644 --- a/substrate/frame/whitelist/src/mock.rs +++ b/substrate/frame/whitelist/src/mock.rs @@ -22,7 +22,7 @@ use crate as pallet_whitelist; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, traits::{ConstU32, ConstU64, Nothing}, }; use frame_system::EnsureRoot; @@ -44,6 +44,7 @@ construct_runtime!( } ); +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { type BaseCallFilter = Nothing; type BlockWeights = (); diff --git a/substrate/utils/frame/rpc/support/src/lib.rs b/substrate/utils/frame/rpc/support/src/lib.rs index 2d8e45cbfc6..b91ae436127 100644 --- a/substrate/utils/frame/rpc/support/src/lib.rs +++ b/substrate/utils/frame/rpc/support/src/lib.rs @@ -34,7 +34,7 @@ use sp_storage::{StorageData, StorageKey}; /// # use jsonrpsee::core::Error as RpcError; /// # use jsonrpsee::ws_client::WsClientBuilder; /// # use codec::Encode; -/// # use frame_support::{construct_runtime, traits::ConstU32}; +/// # use frame_support::{construct_runtime, derive_impl, traits::ConstU32}; /// # use substrate_frame_rpc_support::StorageQuery; /// # use sc_rpc_api::state::StateApiClient; /// # use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; @@ -49,6 +49,7 @@ use sp_storage::{StorageData, StorageKey}; /// # /// # type Hash = sp_core::H256; /// # +/// # #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] /// # impl frame_system::Config for TestRuntime { /// # type BaseCallFilter = (); /// # type BlockWeights = (); -- GitLab From dbd8d20b25b29abf138e3ad35c40df4c55bd7c5c Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 28 Nov 2023 14:23:25 +0100 Subject: [PATCH 142/199] PVF: Add test instructions (#2058) --- polkadot/doc/testing.md | 91 ++++++++++++++----- polkadot/node/core/pvf/README.md | 47 ++++++++++ ...executor_intf.rs => executor_interface.rs} | 6 +- polkadot/node/core/pvf/common/src/lib.rs | 2 +- .../node/core/pvf/execute-worker/src/lib.rs | 4 +- .../benches/prepare_rococo_runtime.rs | 2 +- .../node/core/pvf/prepare-worker/src/lib.rs | 4 +- polkadot/node/core/pvf/src/execute/mod.rs | 2 +- polkadot/node/core/pvf/src/execute/queue.rs | 8 +- .../{worker_intf.rs => worker_interface.rs} | 2 +- polkadot/node/core/pvf/src/lib.rs | 4 +- polkadot/node/core/pvf/src/prepare/mod.rs | 2 +- polkadot/node/core/pvf/src/prepare/pool.rs | 8 +- .../{worker_intf.rs => worker_interface.rs} | 2 +- polkadot/node/core/pvf/src/testing.rs | 4 +- .../{worker_intf.rs => worker_interface.rs} | 0 polkadot/node/core/pvf/tests/README.md | 9 -- .../implementers-guide/src/glossary.md | 9 +- 18 files changed, 146 insertions(+), 60 deletions(-) create mode 100644 polkadot/node/core/pvf/README.md rename polkadot/node/core/pvf/common/src/{executor_intf.rs => executor_interface.rs} (98%) rename polkadot/node/core/pvf/src/execute/{worker_intf.rs => worker_interface.rs} (99%) rename polkadot/node/core/pvf/src/prepare/{worker_intf.rs => worker_interface.rs} (99%) rename polkadot/node/core/pvf/src/{worker_intf.rs => worker_interface.rs} (100%) delete mode 100644 polkadot/node/core/pvf/tests/README.md diff --git a/polkadot/doc/testing.md b/polkadot/doc/testing.md index 1045303baf0..76703b1b439 100644 --- a/polkadot/doc/testing.md +++ b/polkadot/doc/testing.md @@ -1,6 +1,7 @@ # Testing -Automated testing is an essential tool to assure correctness. +Testing is an essential tool to assure correctness. This document describes how we test the Polkadot code, whether +locally, at scale, and/or automatically in CI. ## Scopes @@ -8,27 +9,57 @@ The testing strategy for Polkadot is 4-fold: ### Unit testing (1) -Boring, small scale correctness tests of individual functions. +Boring, small scale correctness tests of individual functions. It is usually +enough to run `cargo test` in the crate you are testing. + +For full coverage you may have to pass some additional features. For example: + +```sh +cargo test --features ci-only-tests +``` ### Integration tests -There are two variants of integration tests: +There are the following variants of integration tests: #### Subsystem tests (2) One particular subsystem (subsystem under test) interacts with a mocked overseer that is made to assert incoming and -outgoing messages of the subsystem under test. This is largely present today, but has some fragmentation in the evolved -integration test implementation. A `proc-macro`/`macro_rules` would allow for more consistent implementation and -structure. +outgoing messages of the subsystem under test. See e.g. the `statement-distribution` tests. #### Behavior tests (3) -Launching small scale networks, with multiple adversarial nodes without any further tooling required. This should -include tests around the thresholds in order to evaluate the error handling once certain assumed invariants fail. +Launching small scale networks, with multiple adversarial nodes. This should include tests around the thresholds in +order to evaluate the error handling once certain assumed invariants fail. + +Currently, we commonly use **zombienet** to run mini test-networks, whether locally or in CI. To run on your machine: + +- First, make sure you have [zombienet][zombienet] installed. + +- Now, all the required binaries must be installed in your $PATH. You must run the following from the `polkadot/` +directory in order to test your changes. (Not `zombienet setup`, or you will get the released binaries without your +local changes!) -For this purpose based on `AllSubsystems` and `proc-macro` `AllSubsystemsGen`. +```sh +cargo install --path . --locked +``` + +- You will also need to install whatever binaries are required for your specific tests. For example, to install +`undying-collator`, from `polkadot/`, run: + +```sh +cargo install --path ./parachain/test-parachains/undying/collator --locked +``` -This assumes a simplistic test runtime. +- Finally, run the zombienet test from the `polkadot` directory: + +```sh +RUST_LOG=parachain::pvf=trace zombienet --provider=native spawn zombienet_tests/functional/0001-parachains-pvf.toml +``` + +- You can pick a validator node like `alice` from the output and view its logs +(`tail -f `) or metrics. Make sure there is nothing funny in the logs +(try `grep WARN `). #### Testing at scale (4) @@ -41,13 +72,27 @@ addition prometheus avoiding additional Polkadot source changes. _Behavior tests_ and _testing at scale_ have naturally soft boundary. The most significant difference is the presence of a real network and the number of nodes, since a single host often not capable to run multiple nodes at once. ---- +## Observing Logs + +To verify expected behavior it's often useful to observe logs. To avoid too many +logs at once, you can run one test at a time: + +1. Add `sp_tracing::try_init_simple();` to the beginning of a test +2. Specify `RUST_LOG=::=trace` before the cargo command. + +For example: + +```sh +RUST_LOG=parachain::pvf=trace cargo test execute_can_run_serially +``` + +For more info on how our logs work, check [the docs][logs]. ## Coverage Coverage gives a _hint_ of the actually covered source lines by tests and test applications. -The state of the art is currently [tarpaulin][tarpaulin] which unfortunately yields a lot of false negatives. Lines that +The state of the art is currently tarpaulin which unfortunately yields a lot of false negatives. Lines that are in fact covered, marked as uncovered due to a mere linebreak in a statement can cause these artifacts. This leads to lower coverage percentages than there actually is. @@ -102,7 +147,7 @@ Fuzzing is an approach to verify correctness against arbitrary or partially stru Currently implemented fuzzing targets: -* `erasure-coding` +- `erasure-coding` The tooling of choice here is `honggfuzz-rs` as it allows _fastest_ coverage according to "some paper" which is a positive feature when run as part of PRs. @@ -113,16 +158,16 @@ hence simply not feasible due to the amount of state that is required. Other candidates to implement fuzzing are: -* `rpc` -* ... +- `rpc` +- ... ## Performance metrics There are various ways of performance metrics. -* timing with `criterion` -* cache hits/misses w/ `iai` harness or `criterion-perf` -* `coz` a performance based compiler +- timing with `criterion` +- cache hits/misses w/ `iai` harness or `criterion-perf` +- `coz` a performance based compiler Most of them are standard tools to aid in the creation of statistical tests regarding change in time of certain unit tests. @@ -140,10 +185,10 @@ pursued at the current time. Requirements: -* spawn nodes with preconfigured behaviors -* allow multiple types of configuration to be specified -* allow extendability via external crates -* ... +- spawn nodes with preconfigured behaviors +- allow multiple types of configuration to be specified +- allow extendability via external crates +- ... --- @@ -251,5 +296,7 @@ behavior_testcase!{ } ``` +[zombienet]: https://github.com/paritytech/zombienet [Gurke]: https://github.com/paritytech/gurke [simnet]: https://github.com/paritytech/simnet_scripts +[logs]: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/node/gum/src/lib.rs diff --git a/polkadot/node/core/pvf/README.md b/polkadot/node/core/pvf/README.md new file mode 100644 index 00000000000..796e17c05fa --- /dev/null +++ b/polkadot/node/core/pvf/README.md @@ -0,0 +1,47 @@ +# PVF Host + +This is the PVF host, responsible for responding to requests from Candidate +Validation and spawning worker tasks to fulfill those requests. + +See also: + +- for more information: [the Implementer's Guide][impl-guide] +- for an explanation of terminology: [the Glossary][glossary] + +## Running basic tests + +Running `cargo test` in the `pvf/` directory will run unit and integration +tests. + +**Note:** some tests run only under Linux, amd64, and/or with the +`ci-only-tests` feature enabled. + +See the general [Testing][testing] instructions for more information on +**running tests** and **observing logs**. + +## Running a test-network with zombienet + +Since this crate is consensus-critical, for major changes it is highly +recommended to run a test-network. See the "Behavior tests" section of the +[Testing][testing] docs for full instructions. + +To run the PVF-specific zombienet test: + +```sh +RUST_LOG=parachain::pvf=trace zombienet --provider=native spawn zombienet_tests/functional/0001-parachains-pvf.toml +``` + +## Testing on Linux + +Some of the PVF functionality, especially related to security, is Linux-only, +and some is amd64-only. If you touch anything security-related, make sure to +test on Linux amd64! If you're on a Mac, you can either run a VM or you can hire +a VPS and use the open-source tool [EternalTerminal][et] to connect to it.[^et] + +[^et]: Unlike ssh, ET preserves your session across disconnects, and unlike +another popular persistent shell, mosh, it allows scrollback. + +[impl-guide]: https://paritytech.github.io/polkadot-sdk/book/pvf-prechecking.html#summary +[glossary]: https://paritytech.github.io/polkadot-sdk/book/glossary.html +[testing]: https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/doc/testing.md +[et]: https://github.com/MisterTea/EternalTerminal diff --git a/polkadot/node/core/pvf/common/src/executor_intf.rs b/polkadot/node/core/pvf/common/src/executor_interface.rs similarity index 98% rename from polkadot/node/core/pvf/common/src/executor_intf.rs rename to polkadot/node/core/pvf/common/src/executor_interface.rs index 4bafc3f4291..e634940dbe6 100644 --- a/polkadot/node/core/pvf/common/src/executor_intf.rs +++ b/polkadot/node/core/pvf/common/src/executor_interface.rs @@ -175,11 +175,9 @@ pub fn params_to_wasmtime_semantics(par: &ExecutorParams) -> Semantics { /// Runs the prevalidation on the given code. Returns a [`RuntimeBlob`] if it succeeds. pub fn prevalidate(code: &[u8]) -> Result { + // Construct the runtime blob and do some basic checks for consistency. let blob = RuntimeBlob::new(code)?; - // It's assumed this function will take care of any prevalidation logic - // that needs to be done. - // - // Do nothing for now. + // In the future this function should take care of any further prevalidation logic. Ok(blob) } diff --git a/polkadot/node/core/pvf/common/src/lib.rs b/polkadot/node/core/pvf/common/src/lib.rs index c041c60aaf9..dced43ef213 100644 --- a/polkadot/node/core/pvf/common/src/lib.rs +++ b/polkadot/node/core/pvf/common/src/lib.rs @@ -18,7 +18,7 @@ pub mod error; pub mod execute; -pub mod executor_intf; +pub mod executor_interface; pub mod prepare; pub mod pvf; pub mod worker; diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index d82a8fca65d..8c985fc7996 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -17,7 +17,7 @@ //! Contains the logic for executing PVFs. Used by the polkadot-execute-worker binary. pub use polkadot_node_core_pvf_common::{ - executor_intf::execute_artifact, worker_dir, SecurityStatus, + executor_interface::execute_artifact, worker_dir, SecurityStatus, }; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are @@ -236,7 +236,7 @@ fn validate_using_artifact( let descriptor_bytes = match unsafe { // SAFETY: this should be safe since the compiled artifact passed here comes from the // file created by the prepare workers. These files are obtained by calling - // [`executor_intf::prepare`]. + // [`executor_interface::prepare`]. execute_artifact(compiled_artifact_blob, executor_params, params) } { Err(err) => return JobResponse::format_invalid("execute", &err), diff --git a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs index ba2568cd80c..d531c90b64b 100644 --- a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs @@ -16,7 +16,7 @@ use criterion::{criterion_group, criterion_main, Criterion, SamplingMode}; use polkadot_node_core_pvf_common::{ - executor_intf::{prepare, prevalidate}, + executor_interface::{prepare, prevalidate}, prepare::PrepareJobKind, pvf::PvfPrepData, }; diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index 499e87c6e20..b3f8a6b11ba 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -18,7 +18,7 @@ mod memory_stats; -use polkadot_node_core_pvf_common::executor_intf::{prepare, prevalidate}; +use polkadot_node_core_pvf_common::executor_interface::{prepare, prevalidate}; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-prepare-worker=trace`. @@ -41,7 +41,7 @@ use os_pipe::{self, PipeReader, PipeWriter}; use parity_scale_codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ error::{PrepareError, PrepareWorkerResult}, - executor_intf::create_runtime_from_artifact_bytes, + executor_interface::create_runtime_from_artifact_bytes, framed_recv_blocking, framed_send_blocking, prepare::{MemoryStats, PrepareJobKind, PrepareStats, PrepareWorkerSuccess}, pvf::PvfPrepData, diff --git a/polkadot/node/core/pvf/src/execute/mod.rs b/polkadot/node/core/pvf/src/execute/mod.rs index 669b9dc04d7..c6d9cf90fa2 100644 --- a/polkadot/node/core/pvf/src/execute/mod.rs +++ b/polkadot/node/core/pvf/src/execute/mod.rs @@ -21,6 +21,6 @@ //! `polkadot_node_core_pvf_worker::execute_worker_entrypoint`. mod queue; -mod worker_intf; +mod worker_interface; pub use queue::{start, PendingExecutionRequest, ToQueue}; diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index a0c24fd4432..be607fe1c20 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -16,12 +16,12 @@ //! A queue that handles requests for PVF execution. -use super::worker_intf::Outcome; +use super::worker_interface::Outcome; use crate::{ artifacts::{ArtifactId, ArtifactPathId}, host::ResultSender, metrics::Metrics, - worker_intf::{IdleWorker, WorkerHandle}, + worker_interface::{IdleWorker, WorkerHandle}, InvalidCandidate, PossiblyInvalidError, ValidationError, LOG_TARGET, }; use futures::{ @@ -448,7 +448,7 @@ async fn spawn_worker_task( use futures_timer::Delay; loop { - match super::worker_intf::spawn( + match super::worker_interface::spawn( &program_path, &cache_path, job.executor_params.clone(), @@ -500,7 +500,7 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { queue.mux.push( async move { let _timer = execution_timer; - let outcome = super::worker_intf::start_work( + let outcome = super::worker_interface::start_work( idle, job.artifact.clone(), job.exec_timeout, diff --git a/polkadot/node/core/pvf/src/execute/worker_intf.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs similarity index 99% rename from polkadot/node/core/pvf/src/execute/worker_intf.rs rename to polkadot/node/core/pvf/src/execute/worker_interface.rs index 16a7c290b52..7864ce548c2 100644 --- a/polkadot/node/core/pvf/src/execute/worker_intf.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -18,7 +18,7 @@ use crate::{ artifacts::ArtifactPathId, - worker_intf::{ + worker_interface::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, diff --git a/polkadot/node/core/pvf/src/lib.rs b/polkadot/node/core/pvf/src/lib.rs index 3306a2461c1..79391630b2d 100644 --- a/polkadot/node/core/pvf/src/lib.rs +++ b/polkadot/node/core/pvf/src/lib.rs @@ -98,7 +98,7 @@ mod metrics; mod prepare; mod priority; mod security; -mod worker_intf; +mod worker_interface; #[cfg(feature = "test-utils")] pub mod testing; @@ -107,7 +107,7 @@ pub use error::{InvalidCandidate, PossiblyInvalidError, ValidationError}; pub use host::{start, Config, ValidationHost, EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}; pub use metrics::Metrics; pub use priority::Priority; -pub use worker_intf::{framed_recv, framed_send, JOB_TIMEOUT_WALL_CLOCK_FACTOR}; +pub use worker_interface::{framed_recv, framed_send, JOB_TIMEOUT_WALL_CLOCK_FACTOR}; // Re-export some common types. pub use polkadot_node_core_pvf_common::{ diff --git a/polkadot/node/core/pvf/src/prepare/mod.rs b/polkadot/node/core/pvf/src/prepare/mod.rs index 580f67f73fa..eb88070c2ba 100644 --- a/polkadot/node/core/pvf/src/prepare/mod.rs +++ b/polkadot/node/core/pvf/src/prepare/mod.rs @@ -24,7 +24,7 @@ mod pool; mod queue; -mod worker_intf; +mod worker_interface; pub use pool::start as start_pool; pub use queue::{start as start_queue, FromQueue, ToQueue}; diff --git a/polkadot/node/core/pvf/src/prepare/pool.rs b/polkadot/node/core/pvf/src/prepare/pool.rs index 4901be9fe1b..4e11f977c9e 100644 --- a/polkadot/node/core/pvf/src/prepare/pool.rs +++ b/polkadot/node/core/pvf/src/prepare/pool.rs @@ -14,10 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::worker_intf::{self, Outcome}; +use super::worker_interface::{self, Outcome}; use crate::{ metrics::Metrics, - worker_intf::{IdleWorker, WorkerHandle}, + worker_interface::{IdleWorker, WorkerHandle}, LOG_TARGET, }; use always_assert::never; @@ -278,7 +278,7 @@ async fn spawn_worker_task( use futures_timer::Delay; loop { - match worker_intf::spawn( + match worker_interface::spawn( &program_path, &cache_path, spawn_timeout, @@ -306,7 +306,7 @@ async fn start_work_task( cache_path: PathBuf, _preparation_timer: Option, ) -> PoolEvent { - let outcome = worker_intf::start_work(&metrics, idle, pvf, cache_path).await; + let outcome = worker_interface::start_work(&metrics, idle, pvf, cache_path).await; PoolEvent::StartWork(worker, outcome) } diff --git a/polkadot/node/core/pvf/src/prepare/worker_intf.rs b/polkadot/node/core/pvf/src/prepare/worker_interface.rs similarity index 99% rename from polkadot/node/core/pvf/src/prepare/worker_intf.rs rename to polkadot/node/core/pvf/src/prepare/worker_interface.rs index a393e9baa9e..984a87ce5c9 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_intf.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_interface.rs @@ -19,7 +19,7 @@ use crate::{ artifacts::ArtifactId, metrics::Metrics, - worker_intf::{ + worker_interface::{ clear_worker_dir_path, framed_recv, framed_send, spawn_with_program_path, IdleWorker, SpawnErr, WorkerDir, WorkerHandle, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }, diff --git a/polkadot/node/core/pvf/src/testing.rs b/polkadot/node/core/pvf/src/testing.rs index c7c885c4342..60b0b4b8d3d 100644 --- a/polkadot/node/core/pvf/src/testing.rs +++ b/polkadot/node/core/pvf/src/testing.rs @@ -18,7 +18,7 @@ pub use crate::{ host::{EXECUTE_BINARY_NAME, PREPARE_BINARY_NAME}, - worker_intf::{spawn_with_program_path, SpawnErr}, + worker_interface::{spawn_with_program_path, SpawnErr}, }; use crate::get_worker_version; @@ -36,7 +36,7 @@ pub fn validate_candidate( code: &[u8], params: &[u8], ) -> Result, Box> { - use polkadot_node_core_pvf_common::executor_intf::{prepare, prevalidate}; + use polkadot_node_core_pvf_common::executor_interface::{prepare, prevalidate}; use polkadot_node_core_pvf_execute_worker::execute_artifact; let code = sp_maybe_compressed_blob::decompress(code, 10 * 1024 * 1024) diff --git a/polkadot/node/core/pvf/src/worker_intf.rs b/polkadot/node/core/pvf/src/worker_interface.rs similarity index 100% rename from polkadot/node/core/pvf/src/worker_intf.rs rename to polkadot/node/core/pvf/src/worker_interface.rs diff --git a/polkadot/node/core/pvf/tests/README.md b/polkadot/node/core/pvf/tests/README.md deleted file mode 100644 index 27385e19025..00000000000 --- a/polkadot/node/core/pvf/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# PVF host integration tests - -## Testing - -Before running these tests, make sure the worker binaries are built first. This can be done with: - -```sh -cargo build --bin polkadot-execute-worker --bin polkadot-prepare-worker -``` diff --git a/polkadot/roadmap/implementers-guide/src/glossary.md b/polkadot/roadmap/implementers-guide/src/glossary.md index b2365ba51c5..ac2392b14d2 100644 --- a/polkadot/roadmap/implementers-guide/src/glossary.md +++ b/polkadot/roadmap/implementers-guide/src/glossary.md @@ -48,10 +48,13 @@ has exactly one downward message queue. - **Proof-of-Validity (PoV):** A stateless-client proof that a parachain candidate is valid, with respect to some validation function. - **PVF:** Parachain Validation Function. The validation code that is run by validators on parachains. -- **PVF Prechecking:** This is the process of initially checking the PVF when it is first added. We attempt preparation - of the PVF and make sure it succeeds within a given timeout, plus some additional checks. +- **PVF Prechecking:** This is the process of checking a PVF when it appears + on-chain, either when the parachain is onboarded or when it signalled an + upgrade of its validation code. We attempt preparation of the PVF and make + sure it that succeeds within a given timeout, plus some additional checks. - **PVF Preparation:** This is the process of preparing the WASM blob and includes both prevalidation and compilation. - As there is no prevalidation right now, preparation just consists of compilation. +- **PVF Prevalidation:** Some basic checks for correctness of the PVF blob. The + first step of PVF preparation, before compilation. - **Relay Parent:** A block in the relay chain, referred to in a context where work is being done in the context of the state at this block. - **Runtime:** The relay-chain state machine. -- GitLab From f01781a9021d1a95db9e77174b94745b8bed244c Mon Sep 17 00:00:00 2001 From: Koute Date: Tue, 28 Nov 2023 22:30:02 +0900 Subject: [PATCH 143/199] Remove `wasm-builder`'s README (#2525) Followup of https://github.com/paritytech/polkadot-sdk/pull/2217 This PR deletes the README of the `wasm-builder` crate and moves its docs back into the rustdoc, [as requested here](https://github.com/paritytech/polkadot-sdk/pull/2217#discussion_r1406401175). (: --- substrate/utils/wasm-builder/Cargo.toml | 1 - substrate/utils/wasm-builder/README.md | 92 ----------------------- substrate/utils/wasm-builder/src/lib.rs | 97 ++++++++++++++++++++++++- 3 files changed, 96 insertions(+), 94 deletions(-) delete mode 100644 substrate/utils/wasm-builder/README.md diff --git a/substrate/utils/wasm-builder/Cargo.toml b/substrate/utils/wasm-builder/Cargo.toml index 2550122ad4b..fc3f6450fd7 100644 --- a/substrate/utils/wasm-builder/Cargo.toml +++ b/substrate/utils/wasm-builder/Cargo.toml @@ -4,7 +4,6 @@ version = "5.0.0-dev" authors.workspace = true description = "Utility for building WASM binaries" edition.workspace = true -readme = "README.md" repository.workspace = true license = "Apache-2.0" homepage = "https://substrate.io" diff --git a/substrate/utils/wasm-builder/README.md b/substrate/utils/wasm-builder/README.md deleted file mode 100644 index 6dfc743816c..00000000000 --- a/substrate/utils/wasm-builder/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# Wasm builder is a utility for building a project as a Wasm binary - -The Wasm builder is a tool that integrates the process of building the WASM binary of your project into the main `cargo` -build process. - -## Project setup - -A project that should be compiled as a Wasm binary needs to: - -1. Add a `build.rs` file. -2. Add `wasm-builder` as dependency into `build-dependencies` (can be made optional and only enabled when `std` feature - is used). - -The `build.rs` file needs to contain the following code: - -```rust,no_run -fn main() { - #[cfg(feature = "std")] - { - substrate_wasm_builder::WasmBuilder::new() - // Tell the builder to build the project (crate) this `build.rs` is part of. - .with_current_project() - // Make sure to export the `heap_base` global, this is required by Substrate - .export_heap_base() - // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) - .import_memory() - // Build it. - .build(); - } -} -``` - -As the final step, you need to add the following to your project: - -```rust,ignore -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -``` - -This will include the generated Wasm binary as two constants `WASM_BINARY` and `WASM_BINARY_BLOATY`. The former is a -compact Wasm binary and the latter is the Wasm binary as being generated by the compiler. Both variables have -`Option<&'static [u8]>` as type. - -### Features - -Wasm builder supports to enable cargo features while building the Wasm binary. By default it will enable all features in -the wasm build that are enabled for the native build except the `default` and `std` features. Besides that, wasm builder -supports the special `runtime-wasm` feature. This `runtime-wasm` feature will be enabled by the wasm builder when it -compiles the Wasm binary. If this feature is not present, it will not be enabled. - -## Environment variables - -By using environment variables, you can configure which Wasm binaries are built and how: - -- `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be recompiled. If this is - the first run and there doesn't exist a Wasm binary, this will set both variables to `None`. -- `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are `release` or `debug`. By - default the build type is equal to the build type used by the main build. -- `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the variable needs to change. - As wasm-builder instructs `cargo` to watch for file changes this environment variable should only - be required in certain circumstances. -- `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm binary. -- `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. -- `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path needs to be absolute. -- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The format needs to be the same - as used by cargo, e.g. `nightly-2020-02-20`. -- `WASM_BUILD_WORKSPACE_HINT` - Hint the workspace that is being built. This is normally not required as we walk up from - the target directory until we find a `Cargo.toml`. If the target directory is changed for - the build, this environment variable can be used to point to the actual workspace. -- `WASM_BUILD_STD` - Sets whether the Rust's standard library crates will also be built. This is necessary to make sure - the standard library crates only use the exact WASM feature set that our executor supports. - Enabled by default. -- `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to prevent network access. - Useful in offline environments. - -Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. Where -`PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `kitchensink-runtime` will be `NODE_RUNTIME`. - -## Prerequisites - -Wasm builder requires the following prerequisites for building the Wasm binary: - -- rust nightly + `wasm32-unknown-unknown` toolchain - -or - -- rust stable and version at least 1.68.0 + `wasm32-unknown-unknown` toolchain - -If a specific rust is installed with `rustup`, it is important that the wasm target is installed as well. For example if -installing the rust from 20.02.2020 using `rustup install nightly-2020-02-20`, the wasm target needs to be installed as -well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. - -License: Apache-2.0 diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index cd31a8cba10..ec85fd1ffdd 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -15,7 +15,102 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![doc = include_str!("../README.md")] +//! # Wasm builder is a utility for building a project as a Wasm binary +//! +//! The Wasm builder is a tool that integrates the process of building the WASM binary of your +//! project into the main `cargo` build process. +//! +//! ## Project setup +//! +//! A project that should be compiled as a Wasm binary needs to: +//! +//! 1. Add a `build.rs` file. +//! 2. Add `wasm-builder` as dependency into `build-dependencies`. +//! +//! The `build.rs` file needs to contain the following code: +//! +//! ```no_run +//! use substrate_wasm_builder::WasmBuilder; +//! +//! fn main() { +//! WasmBuilder::new() +//! // Tell the builder to build the project (crate) this `build.rs` is part of. +//! .with_current_project() +//! // Make sure to export the `heap_base` global, this is required by Substrate +//! .export_heap_base() +//! // Build the Wasm file so that it imports the memory (need to be provided by at instantiation) +//! .import_memory() +//! // Build it. +//! .build() +//! } +//! ``` +//! +//! As the final step, you need to add the following to your project: +//! +//! ```ignore +//! include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +//! ``` +//! +//! This will include the generated Wasm binary as two constants `WASM_BINARY` and +//! `WASM_BINARY_BLOATY`. The former is a compact Wasm binary and the latter is the Wasm binary as +//! being generated by the compiler. Both variables have `Option<&'static [u8]>` as type. +//! +//! ### Feature +//! +//! Wasm builder supports to enable cargo features while building the Wasm binary. By default it +//! will enable all features in the wasm build that are enabled for the native build except the +//! `default` and `std` features. Besides that, wasm builder supports the special `runtime-wasm` +//! feature. This `runtime-wasm` feature will be enabled by the wasm builder when it compiles the +//! Wasm binary. If this feature is not present, it will not be enabled. +//! +//! ## Environment variables +//! +//! By using environment variables, you can configure which Wasm binaries are built and how: +//! +//! - `SKIP_WASM_BUILD` - Skips building any Wasm binary. This is useful when only native should be +//! recompiled. If this is the first run and there doesn't exist a Wasm binary, this will set both +//! variables to `None`. +//! - `WASM_BUILD_TYPE` - Sets the build type for building Wasm binaries. Supported values are +//! `release` or `debug`. By default the build type is equal to the build type used by the main +//! build. +//! - `FORCE_WASM_BUILD` - Can be set to force a Wasm build. On subsequent calls the value of the +//! variable needs to change. As wasm-builder instructs `cargo` to watch for file changes this +//! environment variable should only be required in certain circumstances. +//! - `WASM_BUILD_RUSTFLAGS` - Extend `RUSTFLAGS` given to `cargo build` while building the wasm +//! binary. +//! - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. +//! - `WASM_TARGET_DIRECTORY` - Will copy any build Wasm binary to the given directory. The path +//! needs to be absolute. +//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the Wasm binaries. The +//! format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. +//! - `WASM_BUILD_WORKSPACE_HINT` - Hint the workspace that is being built. This is normally not +//! required as we walk up from the target directory until we find a `Cargo.toml`. If the target +//! directory is changed for the build, this environment variable can be used to point to the +//! actual workspace. +//! - `WASM_BUILD_STD` - Sets whether the Rust's standard library crates will also be built. This is +//! necessary to make sure the standard library crates only use the exact WASM feature set that +//! our executor supports. Enabled by default. +//! - `CARGO_NET_OFFLINE` - If `true`, `--offline` will be passed to all processes launched to +//! prevent network access. Useful in offline environments. +//! +//! Each project can be skipped individually by using the environment variable +//! `SKIP_PROJECT_NAME_WASM_BUILD`. Where `PROJECT_NAME` needs to be replaced by the name of the +//! cargo project, e.g. `kitchensink-runtime` will be `NODE_RUNTIME`. +//! +//! ## Prerequisites: +//! +//! Wasm builder requires the following prerequisites for building the Wasm binary: +//! +//! - rust nightly + `wasm32-unknown-unknown` toolchain +//! +//! or +//! +//! - rust stable and version at least 1.68.0 + `wasm32-unknown-unknown` toolchain +//! +//! If a specific rust is installed with `rustup`, it is important that the wasm target is +//! installed as well. For example if installing the rust from 20.02.2020 using `rustup +//! install nightly-2020-02-20`, the wasm target needs to be installed as well `rustup target add +//! wasm32-unknown-unknown --toolchain nightly-2020-02-20`. use std::{ env, fs, -- GitLab From c5f211d0dee5e698ded0700469fb81a8c22c244a Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:36:04 +0100 Subject: [PATCH 144/199] Remove `im-online` pallet from Rococo and Westend (#2265) Co-authored-by: ordian Co-authored-by: Vladimir Istyufeev --- .gitlab/pipeline/check.yml | 1 + .../chains/relays/rococo/src/genesis.rs | 4 - .../chains/relays/westend/src/genesis.rs | 4 - .../emulated/common/src/lib.rs | 2 - polkadot/node/service/src/chain_spec.rs | 64 +---- polkadot/runtime/rococo/src/lib.rs | 164 ++++++++++-- polkadot/runtime/westend/src/lib.rs | 163 ++++++++++-- .../frame/offences/benchmarking/src/lib.rs | 235 +----------------- 8 files changed, 288 insertions(+), 349 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index b2f2efc5e66..3ef3865aa43 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -111,6 +111,7 @@ check-rust-feature-propagation: time ./try-runtime ${COMMAND_EXTRA_ARGS} \ --runtime ./target/release/wbuild/"$PACKAGE"/"$WASM" \ on-runtime-upgrade --disable-spec-version-check --checks=all ${SUBCOMMAND_EXTRA_ARGS} live --uri ${URI} + sleep 5 # Check runtime migrations for Parity managed relay chains check-runtime-migration-westend: diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs index 6f5f3923ead..45e1e94de01 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/rococo/src/genesis.rs @@ -16,7 +16,6 @@ // Substrate use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{sr25519, storage::Storage}; @@ -38,7 +37,6 @@ const ENDOWMENT: u128 = 1_000_000 * ROC; fn session_keys( babe: BabeId, grandpa: GrandpaId, - im_online: ImOnlineId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, @@ -47,7 +45,6 @@ fn session_keys( rococo_runtime::SessionKeys { babe, grandpa, - im_online, para_validator, para_assignment, authority_discovery, @@ -74,7 +71,6 @@ pub fn genesis() -> Storage { x.4.clone(), x.5.clone(), x.6.clone(), - x.7.clone(), get_from_seed::("Alice"), ), ) diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs index e87b85881d3..e2297100a45 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -16,7 +16,6 @@ // Substrate use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::storage::Storage; @@ -39,7 +38,6 @@ const STASH: u128 = 100 * WND; fn session_keys( babe: BabeId, grandpa: GrandpaId, - im_online: ImOnlineId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, @@ -48,7 +46,6 @@ fn session_keys( westend_runtime::SessionKeys { babe, grandpa, - im_online, para_validator, para_assignment, authority_discovery, @@ -75,7 +72,6 @@ pub fn genesis() -> Storage { x.4.clone(), x.5.clone(), x.6.clone(), - x.7.clone(), get_from_seed::("Alice"), ), ) diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 952b053f2aa..58222f622c2 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -21,7 +21,6 @@ pub use xcm_emulator; // Substrate use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{sr25519, storage::Storage, Pair, Public}; @@ -163,7 +162,6 @@ pub mod validators { AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 871d7e82911..fd35a4aaf6a 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -18,7 +18,6 @@ use beefy_primitives::ecdsa_crypto::AuthorityId as BeefyId; use grandpa::AuthorityId as GrandpaId; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; #[cfg(feature = "westend-native")] use pallet_staking::Forcing; use polkadot_primitives::{AccountId, AccountPublic, AssignmentId, ValidatorId}; @@ -162,7 +161,6 @@ fn default_parachains_host_configuration_is_consistent() { fn westend_session_keys( babe: BabeId, grandpa: GrandpaId, - im_online: ImOnlineId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, @@ -171,7 +169,6 @@ fn westend_session_keys( westend::SessionKeys { babe, grandpa, - im_online, para_validator, para_assignment, authority_discovery, @@ -183,7 +180,6 @@ fn westend_session_keys( fn rococo_session_keys( babe: BabeId, grandpa: GrandpaId, - im_online: ImOnlineId, para_validator: ValidatorId, para_assignment: AssignmentId, authority_discovery: AuthorityDiscoveryId, @@ -192,7 +188,6 @@ fn rococo_session_keys( rococo_runtime::SessionKeys { babe, grandpa, - im_online, para_validator, para_assignment, authority_discovery, @@ -220,7 +215,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, @@ -237,9 +231,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { //5Eb7wM65PNgtY6e33FEAzYtU5cRTXt6WQvZTnzaKQwkVcABk hex!["6faae44b21c6f2681a7f60df708e9f79d340f7d441d28bd987fab8d05c6487e8"] .unchecked_into(), - //5CdS2wGo4qdTQceVfEnbZH8vULeBrnGYCxSCxDna4tQSMV6y - hex!["18f5d55f138bfa8e0ea26ed6fa56817b247de3c2e2030a908c63fb37c146473f"] - .unchecked_into(), //5FqMLAgygdX9UqzukDp15Uid9PAKdFAR621U7xtp5ut2NfrW hex!["a6c1a5b501985a83cb1c37630c5b41e6b0a15b3675b2fd94694758e6cfa6794d"] .unchecked_into(), @@ -264,9 +255,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { //5FXFsPReTUEYPRNKhbTdUathcWBsxTNsLbk2mTpYdKCJewjA hex!["98f4d81cb383898c2c3d54dab28698c0f717c81b509cb32dc6905af3cc697b18"] .unchecked_into(), - //5CDYSCJK91r8y2r1V4Ddrit4PFMEkwZXJe8mNBqGXJ4xWCWq - hex!["06bd7dd4ab4c808c7d09d9cb6bd27fbcd99ad8400e99212b335056c475c24031"] - .unchecked_into(), //5CZjurB78XbSHf6SLkLhCdkqw52Zm7aBYUDdfkLqEDWJ9Zhj hex!["162508accd470e379b04cb0c7c60b35a7d5357e84407a89ed2dd48db4b726960"] .unchecked_into(), @@ -291,9 +279,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { //5G4kCbgqUhEyrRHCyFwFEkgBZXoYA8sbgsRxT9rY8Tp5Jj5F hex!["b0f8d2b9e4e1eafd4dab6358e0b9d5380d78af27c094e69ae9d6d30ca300fd86"] .unchecked_into(), - //5HVhFBLFTKSZK9fX6RktckWDTgYNoSd33fgonsEC8zfr4ddm - hex!["f03c3e184b2883eec9beaeb97f54321587e7476b228831ea0b5fc6da847ea975"] - .unchecked_into(), //5CS7thd2n54WfqeKU3cjvZzK4z5p7zku1Zw97mSzXgPioAAs hex!["1055100a283968271a0781450b389b9093231be809be1e48a305ebad2a90497e"] .unchecked_into(), @@ -318,9 +303,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { //5ChfdrAqmLjCeDJvynbMjcxYLHYzPe8UWXd3HnX9JDThUMbn hex!["1c309a70b4e274314b84c9a0a1f973c9c4fc084df5479ef686c54b1ae4950424"] .unchecked_into(), - //5DnsMm24575xK2b2aGfmafiDxwCet6Mr4iiZQeDdWvi8CzuF - hex!["4c64868ba6d8ace235d3efb4c10d745a67cf3bdfeae23b264d7ea2f3439dec42"] - .unchecked_into(), //5D8C3HHEp5E8fJsXRD56494F413CdRSR9QKGXe7v5ZEfymdj hex!["2ee4d78f328db178c54f205ac809da12e291a33bcbd4f29f081ce7e74bdc5044"] .unchecked_into(), @@ -361,7 +343,6 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { x.5.clone(), x.6.clone(), x.7.clone(), - x.8.clone(), ), ) }) @@ -408,7 +389,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, @@ -425,9 +405,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5CPd3zoV9Aaah4xWucuDivMHJ2nEEmpdi864nPTiyRZp4t87 hex!["0e6d7d1afbcc6547b92995a394ba0daed07a2420be08220a5a1336c6731f0bfa"] .unchecked_into(), - //5F7BEa1LGFksUihyatf3dCDYneB8pWzVyavnByCsm5nBgezi - hex!["86975a37211f8704e947a365b720f7a3e2757988eaa7d0f197e83dba355ef743"] - .unchecked_into(), //5CP6oGfwqbEfML8efqm1tCZsUgRsJztp9L8ZkEUxA16W8PPz hex!["0e07a51d3213842f8e9363ce8e444255990a225f87e80a3d651db7841e1a0205"] .unchecked_into(), @@ -452,9 +429,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5HnDVBN9mD6mXyx8oryhDbJtezwNSj1VRXgLoYCBA6uEkiao hex!["fcd5f87a6fd5707a25122a01b4dac0a8482259df7d42a9a096606df1320df08d"] .unchecked_into(), - //5DhyXZiuB1LvqYKFgT5tRpgGsN3is2cM9QxgW7FikvakbAZP - hex!["48a910c0af90898f11bd57d37ceaea53c78994f8e1833a7ade483c9a84bde055"] - .unchecked_into(), //5EPEWRecy2ApL5n18n3aHyU1956zXTRqaJpzDa9DoqiggNwF hex!["669a10892119453e9feb4e3f1ee8e028916cc3240022920ad643846fbdbee816"] .unchecked_into(), @@ -479,9 +453,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5HAes2RQYPbYKbLBfKb88f4zoXv6pPA6Ke8CjN7dob3GpmSP hex!["e1b68fbd84333e31486c08e6153d9a1415b2e7e71b413702b7d64e9b631184a1"] .unchecked_into(), - //5HTXBf36LXmkFWJLokNUK6fPxVpkr2ToUnB1pvaagdGu4c1T - hex!["ee93e26259decb89afcf17ef2aa0fa2db2e1042fb8f56ecfb24d19eae8629878"] - .unchecked_into(), //5FtAGDZYJKXkhVhAxCQrXmaP7EE2mGbBMfmKDHjfYDgq2BiU hex!["a8e61ffacafaf546283dc92d14d7cc70ea0151a5dd81fdf73ff5a2951f2b6037"] .unchecked_into(), @@ -506,9 +477,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5DJV3zCBTJBLGNDCcdWrYxWDacSz84goGTa4pFeKVvehEBte hex!["36be9069cdb4a8a07ecd51f257875150f0a8a1be44a10d9d98dabf10a030aef4"] .unchecked_into(), - //5FHf8kpK4fPjEJeYcYon2gAPwEBubRvtwpzkUbhMWSweKPUY - hex!["8e95b9b5b4dc69790b67b566567ca8bf8cdef3a3a8bb65393c0d1d1c87cd2d2c"] - .unchecked_into(), //5F9FsRjpecP9GonktmtFL3kjqNAMKjHVFjyjRdTPa4hbQRZA hex!["882d72965e642677583b333b2d173ac94b5fd6c405c76184bb14293be748a13b"] .unchecked_into(), @@ -533,9 +501,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5EX1JBghGbQqWohTPU6msR9qZ2nYPhK9r3RTQ2oD1K8TCxaG hex!["6c878e33b83c20324238d22240f735457b6fba544b383e70bb62a27b57380c81"] .unchecked_into(), - //5GqL8RbVAuNXpDhjQi1KrS1MyNuKhvus2AbmQwRGjpuGZmFu - hex!["d2f9d537ffa59919a4028afdb627c14c14c97a1547e13e8e82203d2049b15b1a"] - .unchecked_into(), //5EUNaBpX9mJgcmLQHyG5Pkms6tbDiKuLbeTEJS924Js9cA1N hex!["6a8570b9c6408e54bacf123cc2bb1b0f087f9c149147d0005badba63a5a4ac01"] .unchecked_into(), @@ -560,9 +525,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5GzDPGbUM9uH52ZEwydasTj8edokGUJ7vEpoFWp9FE1YNuFB hex!["d9c056c98ca0e6b4eb7f5c58c007c1db7be0fe1f3776108f797dd4990d1ccc33"] .unchecked_into(), - //5GWZbVkJEfWZ7fRca39YAQeqri2Z7pkeHyd7rUctUHyQifLp - hex!["c4a980da30939d5bb9e4a734d12bf81259ae286aa21fa4b65405347fa40eff35"] - .unchecked_into(), //5CmLCFeSurRXXtwMmLcVo7sdJ9EqDguvJbuCYDcHkr3cpqyE hex!["1efc23c0b51ad609ab670ecf45807e31acbd8e7e5cb7c07cf49ee42992d2867c"] .unchecked_into(), @@ -587,9 +549,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5DnEySxbnppWEyN8cCLqvGjAorGdLRg2VmkY96dbJ1LHFK8N hex!["4bea0b37e0cce9bddd80835fa2bfd5606f5dcfb8388bbb10b10c483f0856cf14"] .unchecked_into(), - //5E1Y1FJ7dVP7qtE3wm241pTm72rTMcDT5Jd8Czv7Pwp7N3AH - hex!["560d90ca51e9c9481b8a9810060e04d0708d246714960439f804e5c6f40ca651"] - .unchecked_into(), //5CAC278tFCHAeHYqE51FTWYxHmeLcENSS1RG77EFRTvPZMJT hex!["042f07fc5268f13c026bbe199d63e6ac77a0c2a780f71cda05cee5a6f1b3f11f"] .unchecked_into(), @@ -614,9 +573,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { //5DrA2fZdzmNqT5j6DXNwVxPBjDV9jhkAqvjt6Us3bQHKy3cF hex!["4ee66173993dd0db5d628c4c9cb61a27b76611ad3c3925947f0d0011ee2c5dcc"] .unchecked_into(), - //5FNFDUGNLUtqg5LgrwYLNmBiGoP8KRxsvQpBkc7GQP6qaBUG - hex!["92156f54a114ee191415898f2da013d9db6a5362d6b36330d5fc23e27360ab66"] - .unchecked_into(), //5Gx6YeNhynqn8qkda9QKpc9S7oDr4sBrfAu516d3sPpEt26F hex!["d822d4088b20dca29a580a577a97d6f024bb24c9550bebdfd7d2d18e946a1c7d"] .unchecked_into(), @@ -657,7 +613,6 @@ fn rococo_staging_testnet_config_genesis() -> serde_json::Value { x.5.clone(), x.6.clone(), x.7.clone(), - x.8.clone(), ), ) }) @@ -768,35 +723,24 @@ pub fn get_authority_keys_from_seed( AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, BeefyId, ) { let keys = get_authority_keys_from_seed_no_beefy(seed); - (keys.0, keys.1, keys.2, keys.3, keys.4, keys.5, keys.6, keys.7, get_from_seed::(seed)) + (keys.0, keys.1, keys.2, keys.3, keys.4, keys.5, keys.6, get_from_seed::(seed)) } /// Helper function to generate stash, controller and session key from seed pub fn get_authority_keys_from_seed_no_beefy( seed: &str, -) -> ( - AccountId, - AccountId, - BabeId, - GrandpaId, - ImOnlineId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, -) { +) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { ( get_account_id_from_seed::(&format!("{}//stash", seed)), get_account_id_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), - get_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), @@ -829,7 +773,6 @@ pub fn westend_testnet_genesis( AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, @@ -861,7 +804,6 @@ pub fn westend_testnet_genesis( x.5.clone(), x.6.clone(), x.7.clone(), - x.8.clone(), ), ) }) @@ -899,7 +841,6 @@ pub fn rococo_testnet_genesis( AccountId, BabeId, GrandpaId, - ImOnlineId, ValidatorId, AssignmentId, AuthorityDiscoveryId, @@ -930,7 +871,6 @@ pub fn rococo_testnet_genesis( x.5.clone(), x.6.clone(), x.7.clone(), - x.8.clone(), ), ) }) diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 12388da1868..5ac92a73732 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -78,7 +78,6 @@ use frame_support::{ use frame_system::EnsureRoot; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use sp_core::{ConstU128, OpaqueMetadata, H256}; @@ -90,7 +89,8 @@ use sp_runtime::{ Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, + ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, + RuntimeAppPublic, RuntimeDebug, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] @@ -354,14 +354,53 @@ impl pallet_timestamp::Config for Runtime { impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = ImOnline; + type EventHandler = (); +} + +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] +pub struct OldSessionKeys { + pub grandpa: ::Public, + pub babe: ::Public, + pub im_online: pallet_im_online::sr25519::AuthorityId, + pub para_validator: ::Public, + pub para_assignment: ::Public, + pub authority_discovery: ::Public, + pub beefy: ::Public, +} + +impl OpaqueKeys for OldSessionKeys { + type KeyTypeIdProviders = (); + fn key_ids() -> &'static [KeyTypeId] { + &[ + <::Public>::ID, + <::Public>::ID, + sp_core::crypto::key_types::IM_ONLINE, + <::Public>::ID, + <::Public>::ID, + <::Public>::ID, + <::Public>::ID, + ] + } + fn get_raw(&self, i: KeyTypeId) -> &[u8] { + match i { + <::Public>::ID => self.grandpa.as_ref(), + <::Public>::ID => self.babe.as_ref(), + sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), + <::Public>::ID => self.para_validator.as_ref(), + <::Public>::ID => + self.para_assignment.as_ref(), + <::Public>::ID => + self.authority_discovery.as_ref(), + <::Public>::ID => self.beefy.as_ref(), + _ => &[], + } + } } impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, pub babe: Babe, - pub im_online: ImOnline, pub para_validator: Initializer, pub para_assignment: ParaSessionInfo, pub authority_discovery: AuthorityDiscovery, @@ -369,6 +408,18 @@ impl_opaque_keys! { } } +// remove this when removing `OldSessionKeys` +fn transform_session_keys(_val: AccountId, old: OldSessionKeys) -> SessionKeys { + SessionKeys { + grandpa: old.grandpa, + babe: old.babe, + para_validator: old.para_validator, + para_assignment: old.para_assignment, + authority_discovery: old.authority_discovery, + beefy: old.beefy, + } +} + /// Special `ValidatorIdOf` implementation that is just returning the input as result. pub struct ValidatorIdOf; impl sp_runtime::traits::Convert> for ValidatorIdOf { @@ -513,22 +564,6 @@ impl pallet_authority_discovery::Config for Runtime { type MaxAuthorities = MaxAuthorities; } -parameter_types! { - pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); -} - -impl pallet_im_online::Config for Runtime { - type AuthorityId = ImOnlineId; - type RuntimeEvent = RuntimeEvent; - type ValidatorSet = Historical; - type NextSessionRotation = Babe; - type ReportUnresponsiveness = Offences; - type UnsignedPriority = ImOnlineUnsignedPriority; - type WeightInfo = weights::pallet_im_online::WeightInfo; - type MaxKeys = MaxKeys; - type MaxPeerInHeartbeats = MaxPeerInHeartbeats; -} - parameter_types! { pub const MaxSetIdSessionEntries: u32 = BondingDuration::get() * SessionsPerEra::get(); } @@ -778,7 +813,6 @@ impl InstanceFilter for ProxyType { // Specifically omitting the entire Balances pallet RuntimeCall::Session(..) | RuntimeCall::Grandpa(..) | - RuntimeCall::ImOnline(..) | RuntimeCall::Treasury(..) | RuntimeCall::Bounties(..) | RuntimeCall::ChildBounties(..) | @@ -1292,8 +1326,7 @@ construct_runtime! { TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 33, // Consensus support. - // Authorship must be before session in order to note author in the correct session and era - // for im-online. + // Authorship must be before session in order to note author in the correct session and era. Authorship: pallet_authorship::{Pallet, Storage} = 5, Offences: pallet_offences::{Pallet, Storage, Event} = 7, Historical: session_historical::{Pallet} = 34, @@ -1307,7 +1340,6 @@ construct_runtime! { Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 8, Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned} = 10, - ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config} = 11, AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config} = 12, // Governance stuff; uncallable initially. @@ -1453,6 +1485,8 @@ pub mod migrations { use frame_support::traits::LockIdentifier; use frame_system::pallet_prelude::BlockNumberFor; + #[cfg(feature = "try-runtime")] + use sp_core::crypto::ByteArray; parameter_types! { pub const DemocracyPalletName: &'static str = "Democracy"; @@ -1461,6 +1495,7 @@ pub mod migrations { pub const PhragmenElectionPalletName: &'static str = "PhragmenElection"; pub const TechnicalMembershipPalletName: &'static str = "TechnicalMembership"; pub const TipsPalletName: &'static str = "Tips"; + pub const ImOnlinePalletName: &'static str = "ImOnline"; pub const PhragmenElectionPalletId: LockIdentifier = *b"phrelect"; } @@ -1497,10 +1532,82 @@ pub mod migrations { type PalletName = TipsPalletName; } + /// Upgrade Session keys to exclude `ImOnline` key. + /// When this is removed, should also remove `OldSessionKeys`. + pub struct UpgradeSessionKeys; + const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; + + impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); + return Ok(Vec::new()) + } + + log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); + let key_ids = SessionKeys::key_ids(); + frame_support::ensure!( + key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, + "New session keys contain the ImOnline key that should have been removed", + ); + let storage_key = pallet_session::QueuedKeys::::hashed_key(); + let mut state: Vec = Vec::new(); + frame_support::storage::unhashed::get::>( + &storage_key, + ) + .ok_or::("Queued keys are not available".into())? + .into_iter() + .for_each(|(id, keys)| { + state.extend_from_slice(id.as_slice()); + for key_id in key_ids { + state.extend_from_slice(keys.get_raw(*key_id)); + } + }); + frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); + Ok(state) + } + + fn on_runtime_upgrade() -> Weight { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::info!("Skipping session keys upgrade: already applied"); + return ::DbWeight::get().reads(1) + } + log::trace!("Upgrading session keys"); + Session::upgrade_keys::(transform_session_keys); + Perbill::from_percent(50) * BlockWeights::get().max_block + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade( + old_state: sp_std::vec::Vec, + ) -> Result<(), sp_runtime::TryRuntimeError> { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); + return Ok(()) + } + + let key_ids = SessionKeys::key_ids(); + let mut new_state: Vec = Vec::new(); + pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { + new_state.extend_from_slice(id.as_slice()); + for key_id in key_ids { + new_state.extend_from_slice(keys.get_raw(*key_id)); + } + }); + frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); + frame_support::ensure!( + old_state == new_state, + "Pre-upgrade and post-upgrade keys do not match!" + ); + log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); + Ok(()) + } + } + /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_society::migrations::MigrateToV2, - pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, assigned_slots::migration::v1::MigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, @@ -1527,6 +1634,12 @@ pub mod migrations { pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, + + // Upgrade `SessionKeys` to exclude `ImOnline` + UpgradeSessionKeys, + + // Remove `im-online` pallet on-chain storage + frame_support::migrations::RemovePallet::DbWeight>, ); } @@ -1597,7 +1710,6 @@ mod benches { [pallet_conviction_voting, ConvictionVoting] [pallet_nis, Nis] [pallet_identity, Identity] - [pallet_im_online, ImOnline] [pallet_indices, Indices] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index bb6331e9534..f7ff2d5e9e1 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -41,7 +41,6 @@ use frame_support::{ use frame_system::EnsureRoot; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_identity::legacy::IdentityInfo; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; use pallet_session::historical as session_historical; use pallet_transaction_payment::{CurrencyAdapter, FeeDetails, RuntimeDispatchInfo}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; @@ -88,7 +87,8 @@ use sp_runtime::{ IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, + ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, + RuntimeAppPublic, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -417,7 +417,7 @@ impl pallet_timestamp::Config for Runtime { impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (Staking, ImOnline); + type EventHandler = Staking; } parameter_types! { @@ -425,11 +425,50 @@ parameter_types! { pub const Offset: BlockNumber = 0; } +#[derive(Clone, Debug, PartialEq, Eq, Encode, Decode)] +pub struct OldSessionKeys { + pub grandpa: ::Public, + pub babe: ::Public, + pub im_online: pallet_im_online::sr25519::AuthorityId, + pub para_validator: ::Public, + pub para_assignment: ::Public, + pub authority_discovery: ::Public, + pub beefy: ::Public, +} + +impl OpaqueKeys for OldSessionKeys { + type KeyTypeIdProviders = (); + fn key_ids() -> &'static [KeyTypeId] { + &[ + <::Public>::ID, + <::Public>::ID, + sp_core::crypto::key_types::IM_ONLINE, + <::Public>::ID, + <::Public>::ID, + <::Public>::ID, + <::Public>::ID, + ] + } + fn get_raw(&self, i: KeyTypeId) -> &[u8] { + match i { + <::Public>::ID => self.grandpa.as_ref(), + <::Public>::ID => self.babe.as_ref(), + sp_core::crypto::key_types::IM_ONLINE => self.im_online.as_ref(), + <::Public>::ID => self.para_validator.as_ref(), + <::Public>::ID => + self.para_assignment.as_ref(), + <::Public>::ID => + self.authority_discovery.as_ref(), + <::Public>::ID => self.beefy.as_ref(), + _ => &[], + } + } +} + impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, pub babe: Babe, - pub im_online: ImOnline, pub para_validator: Initializer, pub para_assignment: ParaSessionInfo, pub authority_discovery: AuthorityDiscovery, @@ -437,6 +476,18 @@ impl_opaque_keys! { } } +// remove this when removing `OldSessionKeys` +fn transform_session_keys(_v: AccountId, old: OldSessionKeys) -> SessionKeys { + SessionKeys { + grandpa: old.grandpa, + babe: old.babe, + para_validator: old.para_validator, + para_assignment: old.para_assignment, + authority_discovery: old.authority_discovery, + beefy: old.beefy, + } +} + impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = AccountId; @@ -744,19 +795,6 @@ impl pallet_authority_discovery::Config for Runtime { parameter_types! { pub const NposSolutionPriority: TransactionPriority = TransactionPriority::max_value() / 2; - pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); -} - -impl pallet_im_online::Config for Runtime { - type AuthorityId = ImOnlineId; - type RuntimeEvent = RuntimeEvent; - type ValidatorSet = Historical; - type NextSessionRotation = Babe; - type ReportUnresponsiveness = Offences; - type UnsignedPriority = ImOnlineUnsignedPriority; - type WeightInfo = weights::pallet_im_online::WeightInfo; - type MaxKeys = MaxKeys; - type MaxPeerInHeartbeats = MaxPeerInHeartbeats; } parameter_types! { @@ -986,7 +1024,6 @@ impl InstanceFilter for ProxyType { RuntimeCall::Staking(..) | RuntimeCall::Session(..) | RuntimeCall::Grandpa(..) | - RuntimeCall::ImOnline(..) | RuntimeCall::Utility(..) | RuntimeCall::Identity(..) | RuntimeCall::ConvictionVoting(..) | @@ -1374,8 +1411,7 @@ construct_runtime! { TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 26, // Consensus support. - // Authorship must be before session in order to note author in the correct session and era - // for im-online and staking. + // Authorship must be before session in order to note author in the correct session and era. Authorship: pallet_authorship::{Pallet, Storage} = 5, Staking: pallet_staking::{Pallet, Call, Storage, Config, Event} = 6, Offences: pallet_offences::{Pallet, Storage, Event} = 7, @@ -1390,7 +1426,6 @@ construct_runtime! { Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 8, Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event, ValidateUnsigned} = 10, - ImOnline: pallet_im_online::{Pallet, Call, Storage, Event, ValidateUnsigned, Config} = 11, AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config} = 12, // Utility module. @@ -1522,10 +1557,88 @@ pub type Migrations = migrations::Unreleased; #[allow(deprecated, missing_docs)] pub mod migrations { use super::*; + #[cfg(feature = "try-runtime")] + use sp_core::crypto::ByteArray; + + parameter_types! { + pub const ImOnlinePalletName: &'static str = "ImOnline"; + } + + /// Upgrade Session keys to exclude `ImOnline` key. + /// When this is removed, should also remove `OldSessionKeys`. + pub struct UpgradeSessionKeys; + const UPGRADE_SESSION_KEYS_FROM_SPEC: u32 = 104000; + + impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!(target: "runtime::session_keys", "Skipping session keys migration pre-upgrade check due to spec version (already applied?)"); + return Ok(Vec::new()) + } + + log::info!(target: "runtime::session_keys", "Collecting pre-upgrade session keys state"); + let key_ids = SessionKeys::key_ids(); + frame_support::ensure!( + key_ids.into_iter().find(|&k| *k == sp_core::crypto::key_types::IM_ONLINE) == None, + "New session keys contain the ImOnline key that should have been removed", + ); + let storage_key = pallet_session::QueuedKeys::::hashed_key(); + let mut state: Vec = Vec::new(); + frame_support::storage::unhashed::get::>( + &storage_key, + ) + .ok_or::("Queued keys are not available".into())? + .into_iter() + .for_each(|(id, keys)| { + state.extend_from_slice(id.as_slice()); + for key_id in key_ids { + state.extend_from_slice(keys.get_raw(*key_id)); + } + }); + frame_support::ensure!(state.len() > 0, "Queued keys are not empty before upgrade"); + Ok(state) + } + + fn on_runtime_upgrade() -> Weight { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!("Skipping session keys upgrade: already applied"); + return ::DbWeight::get().reads(1) + } + log::info!("Upgrading session keys"); + Session::upgrade_keys::(transform_session_keys); + Perbill::from_percent(50) * BlockWeights::get().max_block + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade( + old_state: sp_std::vec::Vec, + ) -> Result<(), sp_runtime::TryRuntimeError> { + if System::last_runtime_upgrade_spec_version() > UPGRADE_SESSION_KEYS_FROM_SPEC { + log::warn!(target: "runtime::session_keys", "Skipping session keys migration post-upgrade check due to spec version (already applied?)"); + return Ok(()) + } + + let key_ids = SessionKeys::key_ids(); + let mut new_state: Vec = Vec::new(); + pallet_session::QueuedKeys::::get().into_iter().for_each(|(id, keys)| { + new_state.extend_from_slice(id.as_slice()); + for key_id in key_ids { + new_state.extend_from_slice(keys.get_raw(*key_id)); + } + }); + frame_support::ensure!(new_state.len() > 0, "Queued keys are not empty after upgrade"); + frame_support::ensure!( + old_state == new_state, + "Pre-upgrade and post-upgrade keys do not match!" + ); + log::info!(target: "runtime::session_keys", "Session keys migrated successfully"); + Ok(()) + } + } /// Unreleased migrations. Add new ones here: pub type Unreleased = ( - pallet_im_online::migration::v1::Migration, parachains_configuration::migration::v7::MigrateToV7, pallet_staking::migrations::v14::MigrateToV14, assigned_slots::migration::v1::MigrateToV1, @@ -1537,6 +1650,11 @@ pub mod migrations { pallet_grandpa::migrations::MigrateV4ToV5, parachains_configuration::migration::v10::MigrateToV10, pallet_nomination_pools::migration::versioned::V7ToV8, + UpgradeSessionKeys, + frame_support::migrations::RemovePallet< + ImOnlinePalletName, + ::DbWeight, + >, ); } @@ -1583,7 +1701,6 @@ mod benches { [frame_election_provider_support, ElectionProviderBench::] [pallet_fast_unstake, FastUnstake] [pallet_identity, Identity] - [pallet_im_online, ImOnline] [pallet_indices, Indices] [pallet_message_queue, MessageQueue] [pallet_multisig, Multisig] diff --git a/substrate/frame/offences/benchmarking/src/lib.rs b/substrate/frame/offences/benchmarking/src/lib.rs index c190927b84b..563aa4755ce 100644 --- a/substrate/frame/offences/benchmarking/src/lib.rs +++ b/substrate/frame/offences/benchmarking/src/lib.rs @@ -25,30 +25,25 @@ mod mock; use sp_std::{prelude::*, vec}; use frame_benchmarking::v1::{account, benchmarks}; -use frame_support::traits::{Currency, Get, ValidatorSet, ValidatorSetWithIdentification}; +use frame_support::traits::{Currency, Get}; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; -#[cfg(test)] -use sp_runtime::traits::UniqueSaturatedInto; use sp_runtime::{ traits::{Convert, Saturating, StaticLookup}, Perbill, }; -use sp_staking::offence::{Offence, ReportOffence}; +use sp_staking::offence::ReportOffence; use pallet_babe::EquivocationOffence as BabeEquivocationOffence; use pallet_balances::Config as BalancesConfig; use pallet_grandpa::{ EquivocationOffence as GrandpaEquivocationOffence, TimeSlot as GrandpaTimeSlot, }; -use pallet_im_online::{Config as ImOnlineConfig, Pallet as ImOnline, UnresponsivenessOffence}; use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; use pallet_session::{ historical::{Config as HistoricalConfig, IdentificationTuple}, - Config as SessionConfig, SessionManager, + Config as SessionConfig, Pallet as Session, SessionManager, }; -#[cfg(test)] -use pallet_staking::Event as StakingEvent; use pallet_staking::{ Config as StakingConfig, Exposure, IndividualExposure, MaxNominationsOf, Pallet as Staking, RewardDestination, ValidatorPrefs, @@ -56,8 +51,6 @@ use pallet_staking::{ const SEED: u32 = 0; -const MAX_REPORTERS: u32 = 100; -const MAX_OFFENDERS: u32 = 100; const MAX_NOMINATORS: u32 = 100; pub struct Pallet(Offences); @@ -66,7 +59,6 @@ pub trait Config: SessionConfig + StakingConfig + OffencesConfig - + ImOnlineConfig + HistoricalConfig + BalancesConfig + IdTupleConvert @@ -184,220 +176,7 @@ fn make_offenders( Ok((id_tuples, offenders)) } -fn make_offenders_im_online( - num_offenders: u32, - num_nominators: u32, -) -> Result<(Vec>, Vec>), &'static str> { - Staking::::new_session(0); - - let mut offenders = vec![]; - for i in 0..num_offenders { - let offender = create_offender::(i + 1, num_nominators)?; - offenders.push(offender); - } - - Staking::::start_session(0); - - let id_tuples = offenders - .iter() - .map(|offender| { - < - ::ValidatorSet as ValidatorSet - >::ValidatorIdOf::convert(offender.controller.clone()) - .expect("failed to get validator id from account id") - }) - .map(|validator_id| { - < - ::ValidatorSet as ValidatorSetWithIdentification - >::IdentificationOf::convert(validator_id.clone()) - .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification") - }) - .collect::>>(); - Ok((id_tuples, offenders)) -} - -#[cfg(test)] -fn check_events< - T: Config, - I: Iterator, - Item: sp_std::borrow::Borrow<::RuntimeEvent> + sp_std::fmt::Debug, ->( - expected: I, -) { - let events = System::::events() - .into_iter() - .map(|frame_system::EventRecord { event, .. }| event) - .collect::>(); - let expected = expected.collect::>(); - - fn pretty(header: &str, ev: &[D], offset: usize) { - log::info!("{}", header); - for (idx, ev) in ev.iter().enumerate() { - log::info!("\t[{:04}] {:?}", idx + offset, ev); - } - } - fn print_events( - idx: usize, - events: &[D], - expected: &[E], - ) { - let window = 10; - let start = idx.saturating_sub(window / 2); - let end_got = (idx + window / 2).min(events.len()); - pretty("Got(window):", &events[start..end_got], start); - let end_expected = (idx + window / 2).min(expected.len()); - pretty("Expected(window):", &expected[start..end_expected], start); - log::info!("---------------"); - let start_got = events.len().saturating_sub(window); - pretty("Got(end):", &events[start_got..], start_got); - let start_expected = expected.len().saturating_sub(window); - pretty("Expected(end):", &expected[start_expected..], start_expected); - } - - for (idx, (a, b)) in events.iter().zip(expected.iter()).enumerate() { - if a != sp_std::borrow::Borrow::borrow(b) { - print_events(idx, &events, &expected); - log::info!("Mismatch at: {}", idx); - log::info!(" Got: {:?}", b); - log::info!("Expected: {:?}", a); - if events.len() != expected.len() { - log::info!( - "Mismatching lengths. Got: {}, Expected: {}", - events.len(), - expected.len() - ) - } - panic!("Mismatching events."); - } - } - - if events.len() != expected.len() { - print_events(0, &events, &expected); - panic!("Mismatching lengths. Got: {}, Expected: {}", events.len(), expected.len(),) - } -} - benchmarks! { - report_offence_im_online { - let r in 1 .. MAX_REPORTERS; - // we skip 1 offender, because in such case there is no slashing - let o in 2 .. MAX_OFFENDERS; - let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); - - // Make r reporters - let mut reporters = vec![]; - for i in 0 .. r { - let reporter = account("reporter", i, SEED); - reporters.push(reporter); - } - - // make sure reporters actually get rewarded - Staking::::set_slash_reward_fraction(Perbill::one()); - - let (offenders, raw_offenders) = make_offenders_im_online::(o, n)?; - let keys = ImOnline::::keys(); - let validator_set_count = keys.len() as u32; - let offenders_count = offenders.len() as u32; - let offence = UnresponsivenessOffence { - session_index: 0, - validator_set_count, - offenders, - }; - let slash_fraction = offence.slash_fraction(offenders_count); - assert_eq!(System::::event_count(), 0); - }: { - let _ = ::ReportUnresponsiveness::report_offence( - reporters.clone(), - offence - ); - } - verify { - #[cfg(test)] - { - let bond_amount: u32 = UniqueSaturatedInto::::unique_saturated_into(bond_amount::()); - let slash_amount = slash_fraction * bond_amount; - let reward_amount = slash_amount.saturating_mul(1 + n) / 2; - let reward = reward_amount / r; - let slash_report = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::SlashReported{ validator: id, fraction: slash_fraction, slash_era: 0}) - ); - let slash = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Slashed{ staker: id, amount: BalanceOf::::from(slash_amount) }) - ); - let balance_slash = |id| core::iter::once( - ::RuntimeEvent::from(pallet_balances::Event::::Slashed{ who: id, amount: slash_amount.into() }) - ); - let balance_locked = |id| core::iter::once( - ::RuntimeEvent::from(pallet_balances::Event::::Locked{ who: id, amount: slash_amount.into() }) - ); - let balance_unlocked = |id| core::iter::once( - ::RuntimeEvent::from(pallet_balances::Event::::Unlocked{ who: id, amount: slash_amount.into() }) - ); - let chill = |id| core::iter::once( - ::RuntimeEvent::from(StakingEvent::::Chilled{ stash: id }) - ); - let balance_deposit = |id, amount: u32| - ::RuntimeEvent::from(pallet_balances::Event::::Deposit{ who: id, amount: amount.into() }); - let mut first = true; - - // We need to box all events to prevent running into too big allocations in wasm. - // The event in FRAME is represented as an enum and the size of the enum depends on the biggest variant. - // So, instead of requiring `size_of() * expected_events` we only need to - // allocate `size_of>() * expected_events`. - let slash_events = raw_offenders.into_iter() - .flat_map(|offender| { - let nom_slashes = offender.nominator_stashes.into_iter().flat_map(|nom| { - balance_slash(nom.clone()).map(Into::into) - .chain(balance_unlocked(nom.clone()).map(Into::into)) - .chain(slash(nom).map(Into::into)).map(Box::new) - }); - - let events = chill(offender.stash.clone()).map(Into::into).map(Box::new) - .chain(slash_report(offender.stash.clone()).map(Into::into).map(Box::new)) - .chain(balance_slash(offender.stash.clone()).map(Into::into).map(Box::new)) - .chain(balance_unlocked(offender.stash.clone()).map(Into::into).map(Box::new)) - .chain(slash(offender.stash).map(Into::into).map(Box::new)) - .chain(nom_slashes) - .collect::>(); - - // the first deposit creates endowed events, see `endowed_reward_events` - if first { - first = false; - let reward_events = reporters.iter() - .flat_map(|reporter| vec![ - Box::new(balance_deposit(reporter.clone(), reward).into()), - Box::new(frame_system::Event::::NewAccount { account: reporter.clone() }.into()), - Box::new(::RuntimeEvent::from( - pallet_balances::Event::::Endowed{ account: reporter.clone(), free_balance: reward.into() } - ).into()), - ]) - .collect::>(); - events.into_iter().chain(reward_events) - } else { - let reward_events = reporters.iter() - .map(|reporter| Box::new(balance_deposit(reporter.clone(), reward).into())) - .collect::>(); - events.into_iter().chain(reward_events) - } - }); - - // In case of error it's useful to see the inputs - log::info!("Inputs: r: {}, o: {}, n: {}", r, o, n); - // make sure that all slashes have been applied - check_events::( - sp_std::iter::empty() - .chain(slash_events) - .chain(sp_std::iter::once(Box::new(::RuntimeEvent::from( - pallet_offences::Event::Offence{ - kind: UnresponsivenessOffence::::ID, - timeslot: 0_u32.to_le_bytes().to_vec(), - } - ).into()))) - ); - } - } - report_offence_grandpa { let n in 0 .. MAX_NOMINATORS.min(MaxNominationsOf::::get()); @@ -409,12 +188,12 @@ benchmarks! { Staking::::set_slash_reward_fraction(Perbill::one()); let (mut offenders, raw_offenders) = make_offenders::(1, n)?; - let keys = ImOnline::::keys(); + let validator_set_count = Session::::validators().len() as u32; let offence = GrandpaEquivocationOffence { time_slot: GrandpaTimeSlot { set_id: 0, round: 0 }, session_index: 0, - validator_set_count: keys.len() as u32, + validator_set_count, offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); @@ -446,12 +225,12 @@ benchmarks! { Staking::::set_slash_reward_fraction(Perbill::one()); let (mut offenders, raw_offenders) = make_offenders::(1, n)?; - let keys = ImOnline::::keys(); + let validator_set_count = Session::::validators().len() as u32; let offence = BabeEquivocationOffence { slot: 0u64.into(), session_index: 0, - validator_set_count: keys.len() as u32, + validator_set_count, offender: T::convert(offenders.pop().unwrap()), }; assert_eq!(System::::event_count(), 0); -- GitLab From b0b4451f31dcd9da04e2952d7c4cdcc219271b88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= <123550+andresilva@users.noreply.github.com> Date: Tue, 28 Nov 2023 14:00:18 +0000 Subject: [PATCH 145/199] polkadot: remove grandpa pause support (#2511) This was never used and we probably don't need it anyway. --- .../src/lib.rs | 1 - polkadot/cli/src/cli.rs | 10 - polkadot/cli/src/command.rs | 7 - polkadot/node/service/src/grandpa_support.rs | 178 ------------------ polkadot/node/service/src/lib.rs | 26 +-- polkadot/node/test/service/src/lib.rs | 1 - .../adder/collator/src/main.rs | 1 - .../undying/collator/src/main.rs | 1 - 8 files changed, 3 insertions(+), 222 deletions(-) diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 3d297af00f2..4096cd2523f 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -283,7 +283,6 @@ fn build_polkadot_full_node( config, polkadot_service::NewFullParams { is_parachain_node, - grandpa_pause: None, // Disable BEEFY. It should not be required by the internal relay chain node. enable_beefy: false, force_authoring_backoff: false, diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 6a7a8b4ad7b..f1e0d1e99d7 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -82,16 +82,6 @@ pub struct RunCmd { #[arg(long = "force-rococo")] pub force_rococo: bool, - /// Setup a GRANDPA scheduled voting pause. - /// - /// This parameter takes two values, namely a block number and a delay (in blocks). - /// - /// After the given block number is finalized the GRANDPA voter will temporarily - /// stop voting for new blocks until the given delay has elapsed (i.e. until a - /// block at height `pause_block + delay` is imported). - #[arg(long = "grandpa-pause", num_args = 2)] - pub grandpa_pause: Vec, - /// Disable the BEEFY gadget. /// /// Currently enabled by default on 'Rococo', 'Wococo' and 'Versi'. diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index c9c3c39aab1..ba41383c279 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -215,12 +215,6 @@ where set_default_ss58_version(chain_spec); - let grandpa_pause = if cli.run.grandpa_pause.is_empty() { - None - } else { - Some((cli.run.grandpa_pause[0], cli.run.grandpa_pause[1])) - }; - if chain_spec.is_kusama() { info!("----------------------------"); info!("This chain is not in any way"); @@ -257,7 +251,6 @@ where config, service::NewFullParams { is_parachain_node: service::IsParachainNode::No, - grandpa_pause, enable_beefy, force_authoring_backoff: cli.run.force_authoring_backoff, jaeger_agent, diff --git a/polkadot/node/service/src/grandpa_support.rs b/polkadot/node/service/src/grandpa_support.rs index 3a767d9783f..729dbfde5c7 100644 --- a/polkadot/node/service/src/grandpa_support.rs +++ b/polkadot/node/service/src/grandpa_support.rs @@ -16,8 +16,6 @@ //! Polkadot-specific GRANDPA integration utilities. -use std::sync::Arc; - use sp_runtime::traits::{Block as BlockT, Header as _, NumberFor}; use crate::HeaderProvider; @@ -59,55 +57,6 @@ where } } -/// A custom GRANDPA voting rule that "pauses" voting (i.e. keeps voting for the -/// same last finalized block) after a given block at height `N` has been -/// finalized and for a delay of `M` blocks, i.e. until the best block reaches -/// `N` + `M`, the voter will keep voting for block `N`. -#[derive(Clone)] -pub(crate) struct PauseAfterBlockFor(pub(crate) N, pub(crate) N); - -impl grandpa::VotingRule for PauseAfterBlockFor> -where - Block: BlockT, - B: sp_blockchain::HeaderBackend + 'static, -{ - fn restrict_vote( - &self, - backend: Arc, - base: &Block::Header, - best_target: &Block::Header, - current_target: &Block::Header, - ) -> grandpa::VotingRuleResult { - let aux = || { - // only restrict votes targeting a block higher than the block - // we've set for the pause - if *current_target.number() > self.0 { - // if we're past the pause period (i.e. `self.0 + self.1`) - // then we no longer need to restrict any votes - if *best_target.number() > self.0 + self.1 { - return None - } - - // if we've finalized the pause block, just keep returning it - // until best number increases enough to pass the condition above - if *base.number() >= self.0 { - return Some((base.hash(), *base.number())) - } - - // otherwise find the target header at the pause block - // to vote on - return walk_backwards_to_target_block(&*backend, self.0, current_target).ok() - } - - None - }; - - let target = aux(); - - Box::pin(async move { target }) - } -} - /// GRANDPA hard forks due to borked migration of session keys after a runtime /// upgrade (at #1491596), the signaled authority set changes were invalid /// (blank keys) and were impossible to finalize. The authorities for these @@ -214,130 +163,3 @@ pub(crate) fn kusama_hard_forks() -> Vec> { }) .collect() } - -#[cfg(test)] -mod tests { - use consensus_common::BlockOrigin; - use grandpa::VotingRule; - use polkadot_test_client::{ - ClientBlockImportExt, DefaultTestClientBuilderExt, InitPolkadotBlockBuilder, - TestClientBuilder, TestClientBuilderExt, - }; - use sp_blockchain::HeaderBackend; - use sp_runtime::traits::Header; - use std::sync::Arc; - - #[test] - fn grandpa_pause_voting_rule_works() { - let _ = env_logger::try_init(); - - let client = Arc::new(TestClientBuilder::new().build()); - let mut hashes = vec![]; - hashes.push(client.info().genesis_hash); - - let mut push_blocks = { - let mut client = client.clone(); - - move |hashes: &mut Vec<_>, n| { - for _ in 0..n { - let block = client.init_polkadot_block_builder().build().unwrap().block; - hashes.push(block.header.hash()); - futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); - } - } - }; - - let get_header = { - let client = client.clone(); - move |n| client.expect_header(n).unwrap() - }; - - // the rule should filter all votes after block #20 - // is finalized until block #50 is imported. - let voting_rule = super::PauseAfterBlockFor(20, 30); - - // add 10 blocks - push_blocks(&mut hashes, 10); - assert_eq!(client.info().best_number, 10); - - // we have not reached the pause block - // therefore nothing should be restricted - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &get_header(hashes[0]), - &get_header(hashes[10]), - &get_header(hashes[10]) - )), - None, - ); - - // add 15 more blocks - // best block: #25 - push_blocks(&mut hashes, 15); - - // we are targeting the pause block, - // the vote should not be restricted - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &get_header(hashes[10]), - &get_header(hashes[20]), - &get_header(hashes[20]) - )), - None, - ); - - // we are past the pause block, votes should - // be limited to the pause block. - let pause_block = get_header(hashes[20]); - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &get_header(hashes[10]), - &get_header(hashes[21]), - &get_header(hashes[21]) - )), - Some((pause_block.hash(), *pause_block.number())), - ); - - // we've finalized the pause block, so we'll keep - // restricting our votes to it. - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &pause_block, // #20 - &get_header(hashes[21]), - &get_header(hashes[21]), - )), - Some((pause_block.hash(), *pause_block.number())), - ); - - // add 30 more blocks - // best block: #55 - push_blocks(&mut hashes, 30); - - // we're at the last block of the pause, this block - // should still be considered in the pause period - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &pause_block, // #20 - &get_header(hashes[50]), - &get_header(hashes[50]), - )), - Some((pause_block.hash(), *pause_block.number())), - ); - - // we're past the pause period, no votes should be filtered - assert_eq!( - futures::executor::block_on(voting_rule.restrict_vote( - client.clone(), - &pause_block, // #20 - &get_header(hashes[51]), - &get_header(hashes[51]), - )), - None, - ); - } -} diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index fafdc586009..5069ec467c9 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -623,7 +623,6 @@ where #[cfg(feature = "full-node")] pub struct NewFullParams { pub is_parachain_node: IsParachainNode, - pub grandpa_pause: Option<(u32, u32)>, pub enable_beefy: bool, /// Whether to enable the block authoring backoff on production networks /// where it isn't enabled by default. @@ -717,7 +716,6 @@ pub fn new_full( mut config: Configuration, NewFullParams { is_parachain_node, - grandpa_pause, enable_beefy, force_authoring_backoff, jaeger_agent, @@ -1248,32 +1246,14 @@ pub fn new_full( // provide better guarantees of block and vote data availability than // the observer. - // add a custom voting rule to temporarily stop voting for new blocks - // after the given pause block is finalized and restarting after the - // given delay. - let mut builder = grandpa::VotingRulesBuilder::default(); + let mut voting_rules_builder = grandpa::VotingRulesBuilder::default(); #[cfg(not(feature = "malus"))] let _malus_finality_delay = None; if let Some(delay) = _malus_finality_delay { info!(?delay, "Enabling malus finality delay",); - builder = builder.add(grandpa::BeforeBestBlockBy(delay)); - }; - - let voting_rule = match grandpa_pause { - Some((block, delay)) => { - info!( - block_number = %block, - delay = %delay, - "GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", - block, - delay, - ); - - builder.add(grandpa_support::PauseAfterBlockFor(block, delay)).build() - }, - None => builder.build(), + voting_rules_builder = voting_rules_builder.add(grandpa::BeforeBestBlockBy(delay)); }; let grandpa_config = grandpa::GrandpaParams { @@ -1281,7 +1261,7 @@ pub fn new_full( link: link_half, network: network.clone(), sync: sync_service.clone(), - voting_rule, + voting_rule: voting_rules_builder.build(), prometheus_registry: prometheus_registry.clone(), shared_voter_state, telemetry: telemetry.as_ref().map(|x| x.handle()), diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index a8352b60751..312113869bc 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -80,7 +80,6 @@ pub fn new_full( config, polkadot_service::NewFullParams { is_parachain_node, - grandpa_pause: None, enable_beefy: true, force_authoring_backoff: false, jaeger_agent: None, diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index 1558df6708d..3984026f511 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -62,7 +62,6 @@ fn main() -> Result<()> { is_parachain_node: polkadot_service::IsParachainNode::Collator( collator.collator_key(), ), - grandpa_pause: None, enable_beefy: false, force_authoring_backoff: false, jaeger_agent: None, diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index 1db1b6f1102..d70a98c7ef6 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -82,7 +82,6 @@ fn main() -> Result<()> { is_parachain_node: polkadot_service::IsParachainNode::Collator( collator.collator_key(), ), - grandpa_pause: None, enable_beefy: false, force_authoring_backoff: false, jaeger_agent: None, -- GitLab From ec3a61ed86305e5b70ce6e466cf127addf560186 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 28 Nov 2023 16:59:06 +0100 Subject: [PATCH 146/199] Remove pov-recovery race condition/Improve zombienet test (#2526) The test was a bit flaky on CI. There was a race condition in the pov-recovery system. If the timing is bad, it can happen that a block waits for a parent that is already queued for import. The check if a block has children waiting happens when we insert into the import queue. So we need to do an additional check once we receive the import notification for the parent block. Second issue is that `alice` was missing `--in-peers 0` and `--out-peers 0`, so alice was sometimes still fetching block via sync and the assertion on the logs in zombienet would fail. There is another potential issue that I saw once locally. We have a failing pov-recovery queue that fails from time to time to check that the retry mechanism does what it should. We now make sure that the same candidate is never failed twice, so the tests become more predictable. --- cumulus/client/pov-recovery/src/lib.rs | 17 ++++++++++++++--- cumulus/test/service/src/lib.rs | 18 ++++++++++++------ cumulus/zombienet/tests/0002-pov_recovery.toml | 5 ++--- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/cumulus/client/pov-recovery/src/lib.rs b/cumulus/client/pov-recovery/src/lib.rs index b9a140f55c6..32aba6c8993 100644 --- a/cumulus/client/pov-recovery/src/lib.rs +++ b/cumulus/client/pov-recovery/src/lib.rs @@ -410,6 +410,7 @@ where ?block_hash, parent_hash = ?parent, parent_scheduled_for_recovery, + waiting_blocks = self.waiting_for_parent.len(), "Waiting for recovery of parent.", ); @@ -442,13 +443,13 @@ where _ => (), } - self.import_block(block).await; + self.import_block(block); } /// Import the given `block`. /// /// This will also recursivley drain `waiting_for_parent` and import them as well. - async fn import_block(&mut self, block: Block) { + fn import_block(&mut self, block: Block) { let mut blocks = VecDeque::new(); tracing::debug!(target: LOG_TARGET, block_hash = ?block.hash(), "Importing block retrieved using pov_recovery"); @@ -551,7 +552,6 @@ where }; futures::pin_mut!(pending_candidates); - loop { select! { pending_candidate = pending_candidates.next() => { @@ -573,6 +573,17 @@ where imported = imported_blocks.next() => { if let Some(imported) = imported { self.clear_waiting_recovery(&imported.hash); + + // We need to double check that no blocks are waiting for this block. + // Can happen when a waiting child block is queued to wait for parent while the parent block is still + // in the import queue. + if let Some(waiting_blocks) = self.waiting_for_parent.remove(&imported.hash) { + for block in waiting_blocks { + tracing::debug!(target: LOG_TARGET, block_hash = ?block.hash(), resolved_parent = ?imported.hash, "Found new waiting child block during import, queuing."); + self.import_block(block); + } + }; + } else { tracing::debug!(target: LOG_TARGET, "Imported blocks stream ended"); return; diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 4d4c60d1bda..fb858ce0b71 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -27,6 +27,7 @@ mod genesis; use runtime::AccountId; use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}; use std::{ + collections::HashSet, future::Future, net::{IpAddr, Ipv4Addr, SocketAddr}, time::Duration, @@ -57,7 +58,7 @@ use cumulus_test_runtime::{Hash, Header, NodeBlock as Block, RuntimeApi}; use frame_system_rpc_runtime_api::AccountNonceApi; use polkadot_node_subsystem::{errors::RecoveryError, messages::AvailabilityRecoveryMessage}; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Hash as PHash, PersistedValidationData}; +use polkadot_primitives::{CandidateHash, CollatorPair, Hash as PHash, PersistedValidationData}; use polkadot_service::ProvideRuntimeApi; use sc_consensus::ImportQueue; use sc_network::{ @@ -144,12 +145,13 @@ pub type TransactionPool = Arc>; pub struct FailingRecoveryHandle { overseer_handle: OverseerHandle, counter: u32, + failed_hashes: HashSet, } impl FailingRecoveryHandle { /// Create a new FailingRecoveryHandle pub fn new(overseer_handle: OverseerHandle) -> Self { - Self { overseer_handle, counter: 0 } + Self { overseer_handle, counter: 0, failed_hashes: Default::default() } } } @@ -160,11 +162,15 @@ impl RecoveryHandle for FailingRecoveryHandle { message: AvailabilityRecoveryMessage, origin: &'static str, ) { - // For every 5th block we immediately signal unavailability to trigger - // a retry. - if self.counter % 5 == 0 { + let AvailabilityRecoveryMessage::RecoverAvailableData(ref receipt, _, _, _) = message; + let candidate_hash = receipt.hash(); + + // For every 3rd block we immediately signal unavailability to trigger + // a retry. The same candidate is never failed multiple times to ensure progress. + if self.counter % 3 == 0 && self.failed_hashes.insert(candidate_hash) { + tracing::info!(target: LOG_TARGET, ?candidate_hash, "Failing pov recovery."); + let AvailabilityRecoveryMessage::RecoverAvailableData(_, _, _, back_sender) = message; - tracing::info!(target: LOG_TARGET, "Failing pov recovery."); back_sender .send(Err(RecoveryError::Unavailable)) .expect("Return channel should work here."); diff --git a/cumulus/zombienet/tests/0002-pov_recovery.toml b/cumulus/zombienet/tests/0002-pov_recovery.toml index 105e4a324f3..fe42fd4b2f6 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.toml +++ b/cumulus/zombienet/tests/0002-pov_recovery.toml @@ -34,13 +34,12 @@ add_to_genesis = false args = ["--disable-block-announcements"] # run 'alice' as a parachain collator who does not produce blocks - # 'alice' is a bootnode for 'bob' and 'charlie' [[parachains.collators]] name = "alice" validator = true # collator image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--use-null-consensus", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # run 'charlie' as a parachain full node [[parachains.collators]] @@ -48,7 +47,7 @@ add_to_genesis = false validator = false # full node image = "{{COL_IMAGE}}" command = "test-parachain" - args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0","--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] + args = ["-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug", "--disable-block-announcements", "--bootnodes {{'bob'|zombie('multiAddress')}}", "--in-peers 0", "--out-peers 0", "--", "--reserved-only", "--reserved-nodes {{'ferdie'|zombie('multiAddress')}}"] # we fail recovery for 'eve' from time to time to test retries [[parachains.collators]] -- GitLab From e71c484d5bd12330e16f568ad582e1fc1c878669 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Tue, 28 Nov 2023 20:18:52 +0200 Subject: [PATCH 147/199] Rework the event system of `sc-network` (#1370) This commit introduces a new concept called `NotificationService` which allows Polkadot protocols to communicate with the underlying notification protocol implementation directly, without routing events through `NetworkWorker`. This implies that each protocol has its own service which it uses to communicate with remote peers and that each `NotificationService` is unique with respect to the underlying notification protocol, meaning `NotificationService` for the transaction protocol can only be used to send and receive transaction-related notifications. The `NotificationService` concept introduces two additional benefits: * allow protocols to start using custom handshakes * allow protocols to accept/reject inbound peers Previously the validation of inbound connections was solely the responsibility of `ProtocolController`. This caused issues with light peers and `SyncingEngine` as `ProtocolController` would accept more peers than `SyncingEngine` could accept which caused peers to have differing views of their own states. `SyncingEngine` would reject excess peers but these rejections were not properly communicated to those peers causing them to assume that they were accepted. With `NotificationService`, the local handshake is not sent to remote peer if peer is rejected which allows it to detect that it was rejected. This commit also deprecates the use of `NetworkEventStream` for all notification-related events and going forward only DHT events are provided through `NetworkEventStream`. If protocols wish to follow each other's events, they must introduce additional abtractions, as is done for GRANDPA and transactions protocols by following the syncing protocol through `SyncEventStream`. Fixes https://github.com/paritytech/polkadot-sdk/issues/512 Fixes https://github.com/paritytech/polkadot-sdk/issues/514 Fixes https://github.com/paritytech/polkadot-sdk/issues/515 Fixes https://github.com/paritytech/polkadot-sdk/issues/554 Fixes https://github.com/paritytech/polkadot-sdk/issues/556 --- These changes are transferred from https://github.com/paritytech/substrate/pull/14197 but there are no functional changes compared to that PR --------- Co-authored-by: Dmitry Markin Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> --- Cargo.lock | 6 + .../relay-chain-minimal-node/Cargo.toml | 1 + .../src/collator_overseer.rs | 15 +- .../relay-chain-minimal-node/src/lib.rs | 14 +- .../relay-chain-minimal-node/src/network.rs | 25 +- polkadot/node/network/bridge/src/lib.rs | 1 + polkadot/node/network/bridge/src/network.rs | 96 +- polkadot/node/network/bridge/src/rx/mod.rs | 952 +++++++++-------- polkadot/node/network/bridge/src/rx/tests.rs | 335 ++++-- polkadot/node/network/bridge/src/tx/mod.rs | 42 +- polkadot/node/network/bridge/src/tx/tests.rs | 130 ++- .../network/bridge/src/validator_discovery.rs | 13 +- .../node/network/protocol/src/peer_set.rs | 77 +- polkadot/node/service/Cargo.toml | 1 + polkadot/node/service/src/lib.rs | 56 +- polkadot/node/service/src/overseer.rs | 14 +- .../bin/node-template/node/src/service.rs | 7 +- substrate/bin/node/cli/src/service.rs | 33 +- .../consensus/beefy/src/communication/mod.rs | 12 +- substrate/client/consensus/beefy/src/lib.rs | 6 +- substrate/client/consensus/beefy/src/tests.rs | 41 +- .../client/consensus/beefy/src/worker.rs | 4 + .../grandpa/src/communication/mod.rs | 4 +- .../grandpa/src/communication/tests.rs | 232 ++-- substrate/client/consensus/grandpa/src/lib.rs | 22 +- .../client/consensus/grandpa/src/observer.rs | 23 +- .../client/consensus/grandpa/src/tests.rs | 82 +- .../client/executor/wasmtime/src/tests.rs | 8 +- substrate/client/mixnet/Cargo.toml | 1 + .../client/mixnet/src/packet_dispatcher.rs | 36 +- substrate/client/mixnet/src/protocol.rs | 25 +- substrate/client/mixnet/src/run.rs | 60 +- .../client/mixnet/src/sync_with_runtime.rs | 1 + substrate/client/network-gossip/Cargo.toml | 2 + substrate/client/network-gossip/src/bridge.rs | 268 +++-- .../network-gossip/src/state_machine.rs | 166 ++- substrate/client/network/Cargo.toml | 2 + substrate/client/network/common/src/role.rs | 14 +- substrate/client/network/src/behaviour.rs | 55 +- substrate/client/network/src/config.rs | 90 +- substrate/client/network/src/error.rs | 9 + substrate/client/network/src/event.rs | 49 +- substrate/client/network/src/lib.rs | 22 +- substrate/client/network/src/mock.rs | 9 + substrate/client/network/src/peer_store.rs | 41 +- substrate/client/network/src/protocol.rs | 441 +++----- .../client/network/src/protocol/message.rs | 1 + .../network/src/protocol/notifications.rs | 4 + .../src/protocol/notifications/behaviour.rs | 994 ++++++++++++++---- .../src/protocol/notifications/handler.rs | 73 +- .../protocol/notifications/service/metrics.rs | 130 +++ .../src/protocol/notifications/service/mod.rs | 634 +++++++++++ .../protocol/notifications/service/tests.rs | 839 +++++++++++++++ .../src/protocol/notifications/tests.rs | 31 +- .../client/network/src/protocol_controller.rs | 3 + substrate/client/network/src/service.rs | 218 ++-- .../client/network/src/service/metrics.rs | 28 - .../client/network/src/service/signature.rs | 2 + .../client/network/src/service/traits.rs | 205 +++- substrate/client/network/statement/src/lib.rs | 123 ++- substrate/client/network/sync/src/engine.rs | 386 ++++--- .../client/network/sync/src/service/mock.rs | 2 + substrate/client/network/test/src/lib.rs | 97 +- substrate/client/network/test/src/service.rs | 337 +++--- substrate/client/network/test/src/sync.rs | 86 +- .../client/network/transactions/src/lib.rs | 156 ++- substrate/client/offchain/src/api.rs | 6 +- substrate/client/offchain/src/lib.rs | 8 +- substrate/client/service/src/builder.rs | 33 +- .../data/account_reentrance_count_call.wat | 2 +- .../data/add_remove_delegate_dependency.wat | 16 +- .../frame/contracts/fixtures/data/balance.wat | 2 +- .../frame/contracts/fixtures/data/call.wat | 2 +- .../fixtures/data/call_runtime_and_call.wat | 2 +- .../fixtures/data/caller_contract.wat | 66 +- .../fixtures/data/chain_extension.wat | 2 +- .../data/chain_extension_temp_storage.wat | 2 +- .../fixtures/data/create_storage_and_call.wat | 2 +- .../data/create_storage_and_instantiate.wat | 2 +- .../contracts/fixtures/data/crypto_hashes.wat | 6 +- .../data/debug_message_invalid_utf8.wat | 2 +- .../data/debug_message_logging_disabled.wat | 2 +- .../fixtures/data/debug_message_works.wat | 2 +- .../contracts/fixtures/data/delegate_call.wat | 6 +- .../fixtures/data/delegate_call_lib.wat | 2 +- .../fixtures/data/delegate_call_simple.wat | 2 +- .../fixtures/data/destroy_and_transfer.wat | 2 +- .../frame/contracts/fixtures/data/drain.wat | 2 +- .../contracts/fixtures/data/ecdsa_recover.wat | 2 +- .../contracts/fixtures/data/event_size.wat | 2 +- .../contracts/fixtures/data/multi_store.wat | 2 +- .../fixtures/data/reentrance_count_call.wat | 10 +- .../data/reentrance_count_delegated_call.wat | 12 +- .../contracts/fixtures/data/self_destruct.wat | 2 +- .../data/self_destructing_constructor.wat | 2 +- .../contracts/fixtures/data/set_code_hash.wat | 6 +- .../contracts/fixtures/data/storage_size.wat | 2 +- .../contracts/fixtures/data/store_call.wat | 2 +- .../contracts/fixtures/data/store_deploy.wat | 2 +- .../contracts/fixtures/data/xcm_execute.wat | 2 +- .../contracts/fixtures/data/xcm_send.wat | 2 +- substrate/frame/contracts/src/wasm/mod.rs | 54 +- 102 files changed, 5628 insertions(+), 2537 deletions(-) create mode 100644 substrate/client/network/src/protocol/notifications/service/metrics.rs create mode 100644 substrate/client/network/src/protocol/notifications/service/mod.rs create mode 100644 substrate/client/network/src/protocol/notifications/service/tests.rs diff --git a/Cargo.lock b/Cargo.lock index f99b579bfe9..4fa9e83a173 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4177,6 +4177,7 @@ dependencies = [ "cumulus-relay-chain-interface", "cumulus-relay-chain-rpc-interface", "futures", + "parking_lot 0.12.1", "polkadot-availability-recovery", "polkadot-collator-protocol", "polkadot-core-primitives", @@ -13268,6 +13269,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "parity-db", "parity-scale-codec", + "parking_lot 0.12.1", "polkadot-approval-distribution", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", @@ -15728,6 +15730,7 @@ dependencies = [ "array-bytes 4.2.0", "arrayvec 0.7.4", "blake2 0.10.6", + "bytes", "futures", "futures-timer", "libp2p-identity", @@ -15793,6 +15796,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", + "tokio-stream", "tokio-test", "tokio-util", "unsigned-varint", @@ -15848,10 +15852,12 @@ name = "sc-network-gossip" version = "0.10.0-dev" dependencies = [ "ahash 0.8.3", + "async-trait", "futures", "futures-timer", "libp2p", "log", + "parity-scale-codec", "quickcheck", "sc-network", "sc-network-common", diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index ce76fc5cd6d..ee93df09ce1 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -47,4 +47,5 @@ array-bytes = "6.1" tracing = "0.1.37" async-trait = "0.1.73" futures = "0.3.28" +parking_lot = "0.12.1" diff --git a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs index a785a9f6f79..5f5bf338ef9 100644 --- a/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs +++ b/cumulus/client/relay-chain-minimal-node/src/collator_overseer.rs @@ -15,7 +15,8 @@ // along with Polkadot. If not, see . use futures::{select, StreamExt}; -use std::sync::Arc; +use parking_lot::Mutex; +use std::{collections::HashMap, sync::Arc}; use polkadot_availability_recovery::AvailabilityRecoverySubsystem; use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide}; @@ -28,7 +29,7 @@ use polkadot_node_core_chain_api::ChainApiSubsystem; use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; use polkadot_node_core_runtime_api::RuntimeApiSubsystem; use polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, + peer_set::{PeerSet, PeerSetProtocolNames}, request_response::{ v1::{self, AvailableDataFetchingRequest}, v2, IncomingRequestReceiver, ReqProtocolNames, @@ -42,7 +43,7 @@ use polkadot_overseer::{ use polkadot_primitives::CollatorPair; use sc_authority_discovery::Service as AuthorityDiscoveryService; -use sc_network::NetworkStateInfo; +use sc_network::{NetworkStateInfo, NotificationService}; use sc_service::TaskManager; use sc_utils::mpsc::tracing_unbounded; @@ -77,6 +78,8 @@ pub(crate) struct CollatorOverseerGenArgs<'a> { pub req_protocol_names: ReqProtocolNames, /// Peerset protocols name mapping pub peer_set_protocol_names: PeerSetProtocolNames, + /// Notification services for validation/collation protocols. + pub notification_services: HashMap>, } fn build_overseer( @@ -94,6 +97,7 @@ fn build_overseer( collator_pair, req_protocol_names, peer_set_protocol_names, + notification_services, }: CollatorOverseerGenArgs<'_>, ) -> Result< (Overseer, Arc>, OverseerHandle), @@ -101,6 +105,8 @@ fn build_overseer( > { let spawner = SpawnGlue(spawner); let network_bridge_metrics: NetworkBridgeMetrics = Metrics::register(registry)?; + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); + let builder = Overseer::builder() .availability_distribution(DummySubsystem) .availability_recovery(AvailabilityRecoverySubsystem::for_collator( @@ -131,6 +137,8 @@ fn build_overseer( sync_oracle, network_bridge_metrics.clone(), peer_set_protocol_names.clone(), + notification_services, + notification_sinks.clone(), )) .network_bridge_tx(NetworkBridgeTxSubsystem::new( network_service, @@ -138,6 +146,7 @@ fn build_overseer( network_bridge_metrics, req_protocol_names, peer_set_protocol_names, + notification_sinks, )) .provisioner(DummySubsystem) .runtime_api(RuntimeApiSubsystem::new( diff --git a/cumulus/client/relay-chain-minimal-node/src/lib.rs b/cumulus/client/relay-chain-minimal-node/src/lib.rs index 8801f93640c..d121d2d3356 100644 --- a/cumulus/client/relay-chain-minimal-node/src/lib.rs +++ b/cumulus/client/relay-chain-minimal-node/src/lib.rs @@ -21,7 +21,7 @@ use cumulus_relay_chain_rpc_interface::{RelayChainRpcClient, RelayChainRpcInterf use network::build_collator_network; use polkadot_network_bridge::{peer_sets_info, IsAuthority}; use polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, + peer_set::{PeerSet, PeerSetProtocolNames}, request_response::{ v1, v2, IncomingRequest, IncomingRequestReceiver, Protocol, ReqProtocolNames, }, @@ -175,10 +175,13 @@ async fn new_minimal_relay_chain( let peer_set_protocol_names = PeerSetProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - - for config in peer_sets_info(is_authority, &peer_set_protocol_names) { - net_config.add_notification_protocol(config); - } + let notification_services = peer_sets_info(is_authority, &peer_set_protocol_names) + .into_iter() + .map(|(config, (peerset, service))| { + net_config.add_notification_protocol(config); + (peerset, service) + }) + .collect::>>(); let request_protocol_names = ReqProtocolNames::new(genesis_hash, config.chain_spec.fork_id()); let (collation_req_receiver_v1, collation_req_receiver_v2, available_data_req_receiver) = @@ -218,6 +221,7 @@ async fn new_minimal_relay_chain( collator_pair, req_protocol_names: request_protocol_names, peer_set_protocol_names, + notification_services, }; let overseer_handle = diff --git a/cumulus/client/relay-chain-minimal-node/src/network.rs b/cumulus/client/relay-chain-minimal-node/src/network.rs index 813dca47a03..95785063c1a 100644 --- a/cumulus/client/relay-chain-minimal-node/src/network.rs +++ b/cumulus/client/relay-chain-minimal-node/src/network.rs @@ -26,10 +26,9 @@ use sc_network::{ NetworkService, }; -use sc_network::config::FullNetworkConfiguration; +use sc_network::{config::FullNetworkConfiguration, NotificationService}; use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake}; use sc_service::{error::Error, Configuration, NetworkStarter, SpawnTaskHandle}; -use sc_utils::mpsc::tracing_unbounded; use std::{iter, sync::Arc}; @@ -45,7 +44,7 @@ pub(crate) fn build_collator_network( Error, > { let protocol_id = config.protocol_id(); - let block_announce_config = get_block_announce_proto_config::( + let (block_announce_config, _notification_service) = get_block_announce_proto_config::( protocol_id.clone(), &None, Roles::from(&config.role), @@ -69,8 +68,6 @@ pub(crate) fn build_collator_network( let peer_store_handle = peer_store.handle(); spawn_handle.spawn("peer-store", Some("networking"), peer_store.run()); - // RX is not used for anything because syncing is not started for the minimal node - let (tx, _rx) = tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let network_params = sc_network::config::Params:: { role: config.role.clone(), executor: { @@ -86,7 +83,6 @@ pub(crate) fn build_collator_network( protocol_id, metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, - tx, }; let network_worker = sc_network::NetworkWorker::new(network_params)?; @@ -150,7 +146,7 @@ fn get_block_announce_proto_config( best_number: NumberFor, best_hash: B::Hash, genesis_hash: B::Hash, -) -> NonDefaultSetConfig { +) -> (NonDefaultSetConfig, Box) { let block_announces_protocol = { let genesis_hash = genesis_hash.as_ref(); if let Some(ref fork_id) = fork_id { @@ -160,12 +156,11 @@ fn get_block_announce_proto_config( } }; - NonDefaultSetConfig { - notifications_protocol: block_announces_protocol.into(), - fallback_names: iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()) - .collect(), - max_notification_size: 1024 * 1024, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + NonDefaultSetConfig::new( + block_announces_protocol.into(), + iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()).collect(), + 1024 * 1024, + Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( roles, best_number, best_hash, @@ -173,11 +168,11 @@ fn get_block_announce_proto_config( ))), // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement // protocol is still hardcoded into the peerset. - set_config: SetConfig { + SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Deny, }, - } + ) } diff --git a/polkadot/node/network/bridge/src/lib.rs b/polkadot/node/network/bridge/src/lib.rs index 46d4a00faac..ddce99d5c2a 100644 --- a/polkadot/node/network/bridge/src/lib.rs +++ b/polkadot/node/network/bridge/src/lib.rs @@ -83,6 +83,7 @@ pub(crate) enum WireMessage { ViewUpdate(View), } +#[derive(Debug)] pub(crate) struct PeerData { /// The Latest view sent by the peer. view: View, diff --git a/polkadot/node/network/bridge/src/network.rs b/polkadot/node/network/bridge/src/network.rs index c264c94cc19..a9339a5c443 100644 --- a/polkadot/node/network/bridge/src/network.rs +++ b/polkadot/node/network/bridge/src/network.rs @@ -14,23 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::{collections::HashSet, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use async_trait::async_trait; -use futures::{prelude::*, stream::BoxStream}; +use parking_lot::Mutex; use parity_scale_codec::Encode; use sc_network::{ - config::parse_addr, multiaddr::Multiaddr, types::ProtocolName, Event as NetworkEvent, - IfDisconnected, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkRequest, - NetworkService, OutboundFailure, ReputationChange, RequestFailure, + config::parse_addr, multiaddr::Multiaddr, types::ProtocolName, IfDisconnected, MessageSink, + NetworkPeers, NetworkRequest, NetworkService, OutboundFailure, ReputationChange, + RequestFailure, }; use polkadot_node_network_protocol::{ - peer_set::{ - CollationVersion, PeerSet, PeerSetProtocolNames, ProtocolVersion, ValidationVersion, - }, + peer_set::{CollationVersion, PeerSet, ProtocolVersion, ValidationVersion}, request_response::{OutgoingRequest, Recipient, ReqProtocolNames, Requests}, v1 as protocol_v1, v2 as protocol_v2, vstaging as protocol_vstaging, PeerId, }; @@ -44,104 +45,94 @@ const LOG_TARGET: &'static str = "parachain::network-bridge-net"; // Helper function to send a validation v1 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_validation_message_v1( - net: &mut impl Network, peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation v1 message to peers",); send_message( - net, peers, PeerSet::Validation, ValidationVersion::V1.into(), - peerset_protocol_names, message, metrics, + notification_sinks, ); } // Helper function to send a validation vstaging message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_validation_message_vstaging( - net: &mut impl Network, peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { gum::trace!(target: LOG_TARGET, ?peers, ?message, "Sending validation vstaging message to peers",); send_message( - net, peers, PeerSet::Validation, ValidationVersion::VStaging.into(), - peerset_protocol_names, message, metrics, + notification_sinks, ); } // Helper function to send a validation v2 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_validation_message_v2( - net: &mut impl Network, peers: Vec, - protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { send_message( - net, peers, PeerSet::Validation, ValidationVersion::V2.into(), - protocol_names, message, metrics, + notification_sinks, ); } // Helper function to send a collation v1 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_collation_message_v1( - net: &mut impl Network, peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { send_message( - net, peers, PeerSet::Collation, CollationVersion::V1.into(), - peerset_protocol_names, message, metrics, + notification_sinks, ); } // Helper function to send a collation v2 message to a list of peers. // Messages are always sent via the main protocol, even legacy protocol messages. pub(crate) fn send_collation_message_v2( - net: &mut impl Network, peers: Vec, - peerset_protocol_names: &PeerSetProtocolNames, message: WireMessage, metrics: &Metrics, + notification_sinks: &Arc>>>, ) { send_message( - net, peers, PeerSet::Collation, CollationVersion::V2.into(), - peerset_protocol_names, message, metrics, + notification_sinks, ); } @@ -151,19 +142,19 @@ pub(crate) fn send_collation_message_v2( /// messages that are compatible with the passed peer set, as that is currently not enforced by /// this function. These are messages of type `WireMessage` parameterized on the matching type. fn send_message( - net: &mut impl Network, mut peers: Vec, peer_set: PeerSet, version: ProtocolVersion, - protocol_names: &PeerSetProtocolNames, message: M, metrics: &super::Metrics, + network_notification_sinks: &Arc>>>, ) where M: Encode + Clone, { if peers.is_empty() { return } + let message = { let encoded = message.encode(); metrics.on_notification_sent(peer_set, version, encoded.len(), peers.len()); @@ -171,13 +162,13 @@ fn send_message( encoded }; - // optimization: generate the protocol name once. - let protocol_name = protocol_names.get_name(peer_set, version); + let notification_sinks = network_notification_sinks.lock(); + gum::trace!( target: LOG_TARGET, ?peers, + ?peer_set, ?version, - ?protocol_name, ?message, "Sending message to peers", ); @@ -185,29 +176,26 @@ fn send_message( // optimization: avoid cloning the message for the last peer in the // list. The message payload can be quite large. If the underlying // network used `Bytes` this would not be necessary. + // + // peer may have gotten disconnect by the time `send_message()` is called + // at which point the the sink is not available. let last_peer = peers.pop(); - - // We always send messages on the "main" name even when a negotiated - // fallback is used. The libp2p implementation handles the fallback - // under the hood. - let protocol_name = protocol_names.get_main_name(peer_set); peers.into_iter().for_each(|peer| { - net.write_notification(peer, protocol_name.clone(), message.clone()); + if let Some(sink) = notification_sinks.get(&(peer_set, peer)) { + sink.send_sync_notification(message.clone()); + } }); + if let Some(peer) = last_peer { - net.write_notification(peer, protocol_name, message); + if let Some(sink) = notification_sinks.get(&(peer_set, peer)) { + sink.send_sync_notification(message.clone()); + } } } /// An abstraction over networking for the purposes of this subsystem. #[async_trait] pub trait Network: Clone + Send + 'static { - /// Get a stream of all events occurring on the network. This may include events unrelated - /// to the Polkadot protocol - the user of this function should filter only for events related - /// to the [`VALIDATION_PROTOCOL_NAME`](VALIDATION_PROTOCOL_NAME) - /// or [`COLLATION_PROTOCOL_NAME`](COLLATION_PROTOCOL_NAME) - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent>; - /// Ask the network to keep a substream open with these nodes and not disconnect from them /// until removed from the protocol's peer set. /// Note that `out_peers` setting has no effect on this. @@ -239,16 +227,12 @@ pub trait Network: Clone + Send + 'static { /// Disconnect a given peer from the protocol specified without harming reputation. fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName); - /// Write a notification to a peer on the given protocol. - fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec); + /// Get peer role. + fn peer_role(&self, who: PeerId, handshake: Vec) -> Option; } #[async_trait] impl Network for Arc> { - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { - NetworkService::event_stream(self, "polkadot-network-bridge").boxed() - } - async fn set_reserved_peers( &mut self, protocol: ProtocolName, @@ -273,10 +257,6 @@ impl Network for Arc> { NetworkService::disconnect_peer(&**self, who, protocol); } - fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - NetworkService::write_notification(&**self, who, protocol, message); - } - async fn start_request( &self, authority_discovery: &mut AD, @@ -348,6 +328,10 @@ impl Network for Arc> { if_disconnected, ); } + + fn peer_role(&self, who: PeerId, handshake: Vec) -> Option { + NetworkService::peer_role(self, who, handshake) + } } /// We assume one `peer_id` per `authority_id`. diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 06be57ead00..40cd167a968 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -20,11 +20,14 @@ use super::*; use always_assert::never; use bytes::Bytes; -use futures::stream::{BoxStream, StreamExt}; use net_protocol::filter_by_peer_version; use parity_scale_codec::{Decode, DecodeAll}; +use parking_lot::Mutex; -use sc_network::Event as NetworkEvent; +use sc_network::{ + service::traits::{NotificationEvent, ValidationResult}, + MessageSink, NotificationService, +}; use sp_consensus::SyncOracle; use polkadot_node_network_protocol::{ @@ -88,6 +91,9 @@ pub struct NetworkBridgeRx { shared: Shared, metrics: Metrics, peerset_protocol_names: PeerSetProtocolNames, + validation_service: Box, + collation_service: Box, + notification_sinks: Arc>>>, } impl NetworkBridgeRx { @@ -102,8 +108,18 @@ impl NetworkBridgeRx { sync_oracle: Box, metrics: Metrics, peerset_protocol_names: PeerSetProtocolNames, + mut notification_services: HashMap>, + notification_sinks: Arc>>>, ) -> Self { let shared = Shared::default(); + + let validation_service = notification_services + .remove(&PeerSet::Validation) + .expect("validation protocol was enabled so `NotificationService` must exist; qed"); + let collation_service = notification_services + .remove(&PeerSet::Collation) + .expect("collation protocol was enabled so `NotificationService` must exist; qed"); + Self { network_service, authority_discovery_service, @@ -111,6 +127,9 @@ impl NetworkBridgeRx { shared, metrics, peerset_protocol_names, + validation_service, + collation_service, + notification_sinks, } } } @@ -121,444 +140,563 @@ where Net: Network + Sync, AD: validator_discovery::AuthorityDiscovery + Clone + Sync, { - fn start(mut self, ctx: Context) -> SpawnedSubsystem { - // The stream of networking events has to be created at initialization, otherwise the - // networking might open connections before the stream of events has been grabbed. - let network_stream = self.network_service.event_stream(); - + fn start(self, ctx: Context) -> SpawnedSubsystem { // Swallow error because failure is fatal to the node and we log with more precision // within `run_network`. - let future = run_network_in(self, ctx, network_stream) + let future = run_network_in(self, ctx) .map_err(|e| SubsystemError::with_origin("network-bridge", e)) .boxed(); SpawnedSubsystem { name: "network-bridge-rx-subsystem", future } } } -async fn handle_network_messages( - mut sender: impl overseer::NetworkBridgeRxSenderTrait, - mut network_service: impl Network, - network_stream: BoxStream<'static, NetworkEvent>, - mut authority_discovery_service: AD, - metrics: Metrics, - shared: Shared, - peerset_protocol_names: PeerSetProtocolNames, -) -> Result<(), Error> -where +/// Handle notification event received over the validation protocol. +async fn handle_validation_message( + event: NotificationEvent, + network_service: &mut impl Network, + sender: &mut impl overseer::NetworkBridgeRxSenderTrait, + authority_discovery_service: &mut AD, + metrics: &Metrics, + shared: &Shared, + peerset_protocol_names: &PeerSetProtocolNames, + notification_service: &mut Box, + notification_sinks: &mut Arc>>>, +) where AD: validator_discovery::AuthorityDiscovery + Send, { - let mut network_stream = network_stream.fuse(); - loop { - match network_stream.next().await { - None => return Err(Error::EventStreamConcluded), - Some(NetworkEvent::Dht(_)) => {}, - Some(NetworkEvent::NotificationStreamOpened { - remote: peer, - protocol, - role, - negotiated_fallback, - received_handshake: _, - }) => { - let role = ObservedRole::from(role); - let (peer_set, version) = { - let (peer_set, version) = - match peerset_protocol_names.try_get_protocol(&protocol) { - None => continue, - Some(p) => p, - }; - - if let Some(fallback) = negotiated_fallback { - match peerset_protocol_names.try_get_protocol(&fallback) { - None => { + match event { + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => { + // only accept peers whose role can be determined + let result = network_service + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); + }, + NotificationEvent::NotificationStreamOpened { + peer, + handshake, + negotiated_fallback, + .. + } => { + let role = match network_service.peer_role(peer, handshake) { + Some(role) => ObservedRole::from(role), + None => { + gum::debug!( + target: LOG_TARGET, + ?peer, + "Failed to determine peer role for validation protocol", + ); + return + }, + }; + + let (peer_set, version) = { + let (peer_set, version) = + (PeerSet::Validation, PeerSet::Validation.get_main_version()); + + if let Some(fallback) = negotiated_fallback { + match peerset_protocol_names.try_get_protocol(&fallback) { + None => { + gum::debug!( + target: LOG_TARGET, + fallback = &*fallback, + ?peer, + peerset = ?peer_set, + "Unknown fallback", + ); + + return + }, + Some((p2, v2)) => { + if p2 != peer_set { gum::debug!( target: LOG_TARGET, fallback = &*fallback, - ?peer, - ?peer_set, - "Unknown fallback", + fallback_peerset = ?p2, + peerset = ?peer_set, + "Fallback mismatched peer-set", ); - continue - }, - Some((p2, v2)) => { - if p2 != peer_set { - gum::debug!( - target: LOG_TARGET, - fallback = &*fallback, - fallback_peerset = ?p2, - protocol = &*protocol, - peerset = ?peer_set, - "Fallback mismatched peer-set", - ); - - continue - } - - (p2, v2) - }, - } - } else { - (peer_set, version) - } - }; - - gum::debug!( - target: LOG_TARGET, - action = "PeerConnected", - peer_set = ?peer_set, - version = %version, - peer = ?peer, - role = ?role - ); - - let local_view = { - let mut shared = shared.0.lock(); - let peer_map = match peer_set { - PeerSet::Validation => &mut shared.validation_peers, - PeerSet::Collation => &mut shared.collation_peers, - }; + return + } - match peer_map.entry(peer) { - hash_map::Entry::Occupied(_) => continue, - hash_map::Entry::Vacant(vacant) => { - vacant.insert(PeerData { view: View::default(), version }); + (p2, v2) }, } + } else { + (peer_set, version) + } + }; + // store the notification sink to `notification_sinks` so both `NetworkBridgeRx` + // and `NetworkBridgeTx` can send messages to the peer. + match notification_service.message_sink(&peer) { + Some(sink) => { + notification_sinks.lock().insert((peer_set, peer), sink); + }, + None => { + gum::warn!( + target: LOG_TARGET, + peerset = ?peer_set, + version = %version, + ?peer, + ?role, + "Message sink not available for peer", + ); + return + }, + } - metrics.on_peer_connected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); - - shared.local_view.clone().unwrap_or(View::default()) - }; - - let maybe_authority = - authority_discovery_service.get_authority_ids_by_peer_id(peer).await; - - match peer_set { - PeerSet::Validation => { - dispatch_validation_events_to_all( - vec![ - NetworkBridgeEvent::PeerConnected( - peer, - role, - version, - maybe_authority, - ), - NetworkBridgeEvent::PeerViewChange(peer, View::default()), - ], - &mut sender, - &metrics, - ) - .await; - - match ValidationVersion::try_from(version) - .expect("try_get_protocol has already checked version is known; qed") - { - ValidationVersion::V1 => send_validation_message_v1( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - ValidationVersion::VStaging => send_validation_message_vstaging( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - ValidationVersion::V2 => send_validation_message_v2( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - } - }, - PeerSet::Collation => { - dispatch_collation_events_to_all( - vec![ - NetworkBridgeEvent::PeerConnected( - peer, - role, - version, - maybe_authority, - ), - NetworkBridgeEvent::PeerViewChange(peer, View::default()), - ], - &mut sender, - ) - .await; - - match CollationVersion::try_from(version) - .expect("try_get_protocol has already checked version is known; qed") - { - CollationVersion::V1 => send_collation_message_v1( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - CollationVersion::V2 => send_collation_message_v2( - &mut network_service, - vec![peer], - &peerset_protocol_names, - WireMessage::::ViewUpdate( - local_view, - ), - &metrics, - ), - } + gum::debug!( + target: LOG_TARGET, + action = "PeerConnected", + peer_set = ?peer_set, + version = %version, + peer = ?peer, + role = ?role + ); + + let local_view = { + let mut shared = shared.0.lock(); + let peer_map = &mut shared.validation_peers; + + match peer_map.entry(peer) { + hash_map::Entry::Occupied(_) => return, + hash_map::Entry::Vacant(vacant) => { + vacant.insert(PeerData { view: View::default(), version }); }, } - }, - Some(NetworkEvent::NotificationStreamClosed { remote: peer, protocol }) => { - let (peer_set, version) = match peerset_protocol_names.try_get_protocol(&protocol) { - None => continue, - Some(peer_set) => peer_set, - }; - gum::debug!( - target: LOG_TARGET, - action = "PeerDisconnected", - peer_set = ?peer_set, - peer = ?peer - ); + metrics.on_peer_connected(peer_set, version); + metrics.note_peer_count(peer_set, version, peer_map.len()); - let was_connected = { - let mut shared = shared.0.lock(); - let peer_map = match peer_set { - PeerSet::Validation => &mut shared.validation_peers, - PeerSet::Collation => &mut shared.collation_peers, - }; + shared.local_view.clone().unwrap_or(View::default()) + }; - let w = peer_map.remove(&peer).is_some(); + let maybe_authority = + authority_discovery_service.get_authority_ids_by_peer_id(peer).await; - metrics.on_peer_disconnected(peer_set, version); - metrics.note_peer_count(peer_set, version, peer_map.len()); + dispatch_validation_events_to_all( + vec![ + NetworkBridgeEvent::PeerConnected(peer, role, version, maybe_authority), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), + ], + sender, + &metrics, + ) + .await; - w - }; + match ValidationVersion::try_from(version) + .expect("try_get_protocol has already checked version is known; qed") + { + ValidationVersion::V1 => send_validation_message_v1( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + ValidationVersion::VStaging => send_validation_message_vstaging( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + ValidationVersion::V2 => send_validation_message_v2( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + } + }, + NotificationEvent::NotificationStreamClosed { peer } => { + let (peer_set, version) = (PeerSet::Validation, PeerSet::Validation.get_main_version()); - if was_connected && version == peer_set.get_main_version() { - match peer_set { - PeerSet::Validation => - dispatch_validation_event_to_all( - NetworkBridgeEvent::PeerDisconnected(peer), - &mut sender, - &metrics, - ) - .await, - PeerSet::Collation => - dispatch_collation_event_to_all( - NetworkBridgeEvent::PeerDisconnected(peer), - &mut sender, - ) - .await, - } + gum::debug!( + target: LOG_TARGET, + action = "PeerDisconnected", + ?peer_set, + ?peer + ); + + let was_connected = { + let mut shared = shared.0.lock(); + let peer_map = &mut shared.validation_peers; + + let w = peer_map.remove(&peer).is_some(); + + metrics.on_peer_disconnected(peer_set, version); + metrics.note_peer_count(peer_set, version, peer_map.len()); + + w + }; + + notification_sinks.lock().remove(&(peer_set, peer)); + + if was_connected && version == peer_set.get_main_version() { + dispatch_validation_event_to_all( + NetworkBridgeEvent::PeerDisconnected(peer), + sender, + &metrics, + ) + .await; + } + }, + NotificationEvent::NotificationReceived { peer, notification } => { + let expected_versions = { + let mut versions = PerPeerSet::>::default(); + let shared = shared.0.lock(); + + if let Some(peer_data) = shared.validation_peers.get(&peer) { + versions[PeerSet::Validation] = Some(peer_data.version); } - }, - Some(NetworkEvent::NotificationsReceived { remote, messages }) => { - let expected_versions = { - let mut versions = PerPeerSet::>::default(); - let shared = shared.0.lock(); - if let Some(peer_data) = shared.validation_peers.get(&remote) { - versions[PeerSet::Validation] = Some(peer_data.version); - } - if let Some(peer_data) = shared.collation_peers.get(&remote) { - versions[PeerSet::Collation] = Some(peer_data.version); - } + versions + }; - versions + gum::trace!( + target: LOG_TARGET, + action = "PeerMessage", + peerset = ?PeerSet::Validation, + ?peer, + ); + + let (events, reports) = + if expected_versions[PeerSet::Validation] == Some(ValidationVersion::V1.into()) { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else if expected_versions[PeerSet::Validation] == + Some(ValidationVersion::V2.into()) + { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else if expected_versions[PeerSet::Validation] == + Some(ValidationVersion::VStaging.into()) + { + handle_peer_messages::( + peer, + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + vec![notification.into()], + metrics, + ) + } else { + gum::warn!( + target: LOG_TARGET, + version = ?expected_versions[PeerSet::Validation], + "Major logic bug. Peer somehow has unsupported validation protocol version." + ); + + never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); + + // If a peer somehow triggers this, we'll disconnect them + // eventually. + (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) }; - // non-decoded, but version-checked validation messages. - let v_messages: Result, _> = messages - .iter() - .filter_map(|(protocol, msg_bytes)| { - // version doesn't matter because we always receive on the 'correct' - // protocol name, not the negotiated fallback. - let (peer_set, version) = - peerset_protocol_names.try_get_protocol(protocol)?; - gum::trace!( - target: LOG_TARGET, - ?peer_set, - ?protocol, - ?version, - "Received notification" - ); + for report in reports { + network_service.report_peer(peer, report.into()); + } - if peer_set == PeerSet::Validation { - if expected_versions[PeerSet::Validation].is_none() { - return Some(Err(UNCONNECTED_PEERSET_COST)) - } + dispatch_validation_events_to_all(events, sender, &metrics).await; + }, + } +} + +/// Handle notification event received over the collation protocol. +async fn handle_collation_message( + event: NotificationEvent, + network_service: &mut impl Network, + sender: &mut impl overseer::NetworkBridgeRxSenderTrait, + authority_discovery_service: &mut AD, + metrics: &Metrics, + shared: &Shared, + peerset_protocol_names: &PeerSetProtocolNames, + notification_service: &mut Box, + notification_sinks: &mut Arc>>>, +) where + AD: validator_discovery::AuthorityDiscovery + Send, +{ + match event { + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => { + // only accept peers whose role can be determined + let result = network_service + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); + }, + NotificationEvent::NotificationStreamOpened { + peer, + handshake, + negotiated_fallback, + .. + } => { + let role = match network_service.peer_role(peer, handshake) { + Some(role) => ObservedRole::from(role), + None => { + gum::debug!( + target: LOG_TARGET, + ?peer, + "Failed to determine peer role for validation protocol", + ); + return + }, + }; + + let (peer_set, version) = { + let (peer_set, version) = + (PeerSet::Collation, PeerSet::Collation.get_main_version()); + + if let Some(fallback) = negotiated_fallback { + match peerset_protocol_names.try_get_protocol(&fallback) { + None => { + gum::debug!( + target: LOG_TARGET, + fallback = &*fallback, + ?peer, + ?peer_set, + "Unknown fallback", + ); + + return + }, + Some((p2, v2)) => { + if p2 != peer_set { + gum::debug!( + target: LOG_TARGET, + fallback = &*fallback, + fallback_peerset = ?p2, + peerset = ?peer_set, + "Fallback mismatched peer-set", + ); - Some(Ok(msg_bytes.clone())) - } else { - None - } - }) - .collect(); + return + } - let v_messages = match v_messages { - Err(rep) => { - gum::debug!(target: LOG_TARGET, action = "ReportPeer"); - network_service.report_peer(remote, rep.into()); + (p2, v2) + }, + } + } else { + (peer_set, version) + } + }; + + // store the notification sink to `notification_sinks` so both `NetworkBridgeRx` + // and `NetworkBridgeTx` can send messages to the peer. + match notification_service.message_sink(&peer) { + Some(sink) => { + notification_sinks.lock().insert((peer_set, peer), sink); + }, + None => { + gum::warn!( + target: LOG_TARGET, + peer_set = ?peer_set, + version = %version, + peer = ?peer, + role = ?role, + "Message sink not available for peer", + ); + return + }, + } - continue + gum::debug!( + target: LOG_TARGET, + action = "PeerConnected", + peer_set = ?peer_set, + version = %version, + peer = ?peer, + role = ?role + ); + + let local_view = { + let mut shared = shared.0.lock(); + let peer_map = &mut shared.collation_peers; + + match peer_map.entry(peer) { + hash_map::Entry::Occupied(_) => return, + hash_map::Entry::Vacant(vacant) => { + vacant.insert(PeerData { view: View::default(), version }); }, - Ok(v) => v, - }; + } - // non-decoded, but version-checked collation messages. - let c_messages: Result, _> = messages - .iter() - .filter_map(|(protocol, msg_bytes)| { - // version doesn't matter because we always receive on the 'correct' - // protocol name, not the negotiated fallback. - let (peer_set, _version) = - peerset_protocol_names.try_get_protocol(protocol)?; - - if peer_set == PeerSet::Collation { - if expected_versions[PeerSet::Collation].is_none() { - return Some(Err(UNCONNECTED_PEERSET_COST)) - } + metrics.on_peer_connected(peer_set, version); + metrics.note_peer_count(peer_set, version, peer_map.len()); - Some(Ok(msg_bytes.clone())) - } else { - None - } - }) - .collect(); + shared.local_view.clone().unwrap_or(View::default()) + }; - let c_messages = match c_messages { - Err(rep) => { - gum::debug!(target: LOG_TARGET, action = "ReportPeer"); - network_service.report_peer(remote, rep.into()); + let maybe_authority = + authority_discovery_service.get_authority_ids_by_peer_id(peer).await; - continue - }, - Ok(v) => v, - }; + dispatch_collation_events_to_all( + vec![ + NetworkBridgeEvent::PeerConnected(peer, role, version, maybe_authority), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), + ], + sender, + ) + .await; - if v_messages.is_empty() && c_messages.is_empty() { - continue - } + match CollationVersion::try_from(version) + .expect("try_get_protocol has already checked version is known; qed") + { + CollationVersion::V1 => send_collation_message_v1( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + CollationVersion::V2 => send_collation_message_v2( + vec![peer], + WireMessage::::ViewUpdate(local_view), + metrics, + notification_sinks, + ), + } + }, + NotificationEvent::NotificationStreamClosed { peer } => { + let (peer_set, version) = (PeerSet::Collation, PeerSet::Collation.get_main_version()); - gum::trace!( - target: LOG_TARGET, - action = "PeerMessages", - peer = ?remote, - num_validation_messages = %v_messages.len(), - num_collation_messages = %c_messages.len() - ); + gum::debug!( + target: LOG_TARGET, + action = "PeerDisconnected", + ?peer_set, + ?peer + ); - if !v_messages.is_empty() { - let (events, reports) = if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::V1.into()) - { - handle_peer_messages::( - remote, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - v_messages, - &metrics, - ) - } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::V2.into()) - { - handle_peer_messages::( - remote, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - v_messages, - &metrics, - ) - } else if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::VStaging.into()) - { - handle_peer_messages::( - remote, - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - v_messages, - &metrics, - ) - } else { - gum::warn!( - target: LOG_TARGET, - version = ?expected_versions[PeerSet::Validation], - "Major logic bug. Peer somehow has unsupported validation protocol version." - ); + let was_connected = { + let mut shared = shared.0.lock(); + let peer_map = &mut shared.collation_peers; - never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); + let w = peer_map.remove(&peer).is_some(); - // If a peer somehow triggers this, we'll disconnect them - // eventually. - (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) - }; + metrics.on_peer_disconnected(peer_set, version); + metrics.note_peer_count(peer_set, version, peer_map.len()); - for report in reports { - network_service.report_peer(remote, report.into()); - } + w + }; - dispatch_validation_events_to_all(events, &mut sender, &metrics).await; + notification_sinks.lock().remove(&(peer_set, peer)); + + if was_connected && version == peer_set.get_main_version() { + dispatch_collation_event_to_all(NetworkBridgeEvent::PeerDisconnected(peer), sender) + .await; + } + }, + NotificationEvent::NotificationReceived { peer, notification } => { + let expected_versions = { + let mut versions = PerPeerSet::>::default(); + let shared = shared.0.lock(); + + if let Some(peer_data) = shared.collation_peers.get(&peer) { + versions[PeerSet::Collation] = Some(peer_data.version); } - if !c_messages.is_empty() { - let (events, reports) = if expected_versions[PeerSet::Collation] == - Some(CollationVersion::V1.into()) - { - handle_peer_messages::( - remote, - PeerSet::Collation, - &mut shared.0.lock().collation_peers, - c_messages, - &metrics, - ) - } else if expected_versions[PeerSet::Collation] == - Some(CollationVersion::V2.into()) - { - handle_peer_messages::( - remote, - PeerSet::Collation, - &mut shared.0.lock().collation_peers, - c_messages, - &metrics, - ) - } else { - gum::warn!( - target: LOG_TARGET, - version = ?expected_versions[PeerSet::Collation], - "Major logic bug. Peer somehow has unsupported collation protocol version." - ); + versions + }; - never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); + gum::trace!( + target: LOG_TARGET, + action = "PeerMessage", + peerset = ?PeerSet::Collation, + ?peer, + ); + + let (events, reports) = + if expected_versions[PeerSet::Collation] == Some(CollationVersion::V1.into()) { + handle_peer_messages::( + peer, + PeerSet::Collation, + &mut shared.0.lock().collation_peers, + vec![notification.into()], + metrics, + ) + } else if expected_versions[PeerSet::Collation] == Some(CollationVersion::V2.into()) + { + handle_peer_messages::( + peer, + PeerSet::Collation, + &mut shared.0.lock().collation_peers, + vec![notification.into()], + metrics, + ) + } else { + gum::warn!( + target: LOG_TARGET, + version = ?expected_versions[PeerSet::Collation], + "Major logic bug. Peer somehow has unsupported collation protocol version." + ); - // If a peer somehow triggers this, we'll disconnect them - // eventually. - (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) - }; + never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); - for report in reports { - network_service.report_peer(remote, report.into()); - } + // If a peer somehow triggers this, we'll disconnect them + // eventually. + (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) + }; - dispatch_collation_events_to_all(events, &mut sender).await; - } + for report in reports { + network_service.report_peer(peer, report.into()); + } + + dispatch_collation_events_to_all(events, sender).await; + }, + } +} + +async fn handle_network_messages( + mut sender: impl overseer::NetworkBridgeRxSenderTrait, + mut network_service: impl Network, + mut authority_discovery_service: AD, + metrics: Metrics, + shared: Shared, + peerset_protocol_names: PeerSetProtocolNames, + mut validation_service: Box, + mut collation_service: Box, + mut notification_sinks: Arc>>>, +) -> Result<(), Error> +where + AD: validator_discovery::AuthorityDiscovery + Send, +{ + loop { + futures::select! { + event = validation_service.next_event().fuse() => match event { + Some(event) => handle_validation_message( + event, + &mut network_service, + &mut sender, + &mut authority_discovery_service, + &metrics, + &shared, + &peerset_protocol_names, + &mut validation_service, + &mut notification_sinks, + ).await, + None => return Err(Error::EventStreamConcluded), }, + event = collation_service.next_event().fuse() => match event { + Some(event) => handle_collation_message( + event, + &mut network_service, + &mut sender, + &mut authority_discovery_service, + &metrics, + &shared, + &peerset_protocol_names, + &mut collation_service, + &mut notification_sinks, + ).await, + None => return Err(Error::EventStreamConcluded), + } } } } @@ -593,17 +731,15 @@ where } #[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)] -async fn run_incoming_orchestra_signals( +async fn run_incoming_orchestra_signals( mut ctx: Context, - mut network_service: N, mut authority_discovery_service: AD, shared: Shared, sync_oracle: Box, metrics: Metrics, - peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, ) -> Result<(), Error> where - N: Network, AD: validator_discovery::AuthorityDiscovery + Clone, { // This is kept sorted, descending, by block number. @@ -695,13 +831,12 @@ where mode = Mode::Active; update_our_view( - &mut network_service, &mut ctx, &live_heads, &shared, finalized_number, &metrics, - &peerset_protocol_names, + ¬ification_sinks, ); } } @@ -735,7 +870,6 @@ where async fn run_network_in( bridge: NetworkBridgeRx, mut ctx: Context, - network_stream: BoxStream<'static, NetworkEvent>, ) -> Result<(), Error> where N: Network, @@ -748,16 +882,21 @@ where sync_oracle, shared, peerset_protocol_names, + validation_service, + collation_service, + notification_sinks, } = bridge; let (task, network_event_handler) = handle_network_messages( ctx.sender().clone(), network_service.clone(), - network_stream, authority_discovery_service.clone(), metrics.clone(), shared.clone(), peerset_protocol_names.clone(), + validation_service, + collation_service, + notification_sinks.clone(), ) .remote_handle(); @@ -766,12 +905,11 @@ where let orchestra_signal_handler = run_incoming_orchestra_signals( ctx, - network_service, authority_discovery_service, shared, sync_oracle, metrics, - peerset_protocol_names, + notification_sinks, ); futures::pin_mut!(orchestra_signal_handler); @@ -791,17 +929,14 @@ fn construct_view( } #[overseer::contextbounds(NetworkBridgeRx, prefix = self::overseer)] -fn update_our_view( - net: &mut Net, +fn update_our_view( ctx: &mut Context, live_heads: &[ActivatedLeaf], shared: &Shared, finalized_number: BlockNumber, metrics: &Metrics, - peerset_protocol_names: &PeerSetProtocolNames, -) where - Net: Network, -{ + notification_sinks: &Arc>>>, +) { let new_view = construct_view(live_heads.iter().map(|v| v.hash), finalized_number); let (validation_peers, collation_peers) = { @@ -849,43 +984,38 @@ fn update_our_view( filter_by_peer_version(&validation_peers, ValidationVersion::VStaging.into()); send_validation_message_v1( - net, v1_validation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); send_collation_message_v1( - net, v1_collation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); send_validation_message_v2( - net, v2_validation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); send_collation_message_v2( - net, v2_collation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); send_validation_message_vstaging( - net, vstaging_validation_peers, - peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, + notification_sinks, ); let our_view = OurView::new( diff --git a/polkadot/node/network/bridge/src/rx/tests.rs b/polkadot/node/network/bridge/src/rx/tests.rs index f784e78a7f2..e0b86feb644 100644 --- a/polkadot/node/network/bridge/src/rx/tests.rs +++ b/polkadot/node/network/bridge/src/rx/tests.rs @@ -15,7 +15,7 @@ // along with Polkadot. If not, see . use super::*; -use futures::{channel::oneshot, executor, stream::BoxStream}; +use futures::{channel::oneshot, executor}; use overseer::jaeger; use polkadot_node_network_protocol::{self as net_protocol, OurView}; use polkadot_node_subsystem::messages::NetworkBridgeEvent; @@ -26,10 +26,13 @@ use parking_lot::Mutex; use std::{ collections::HashSet, sync::atomic::{AtomicBool, Ordering}, - task::Poll, }; -use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange}; +use sc_network::{ + service::traits::{Direction, MessageSink, NotificationService}, + IfDisconnected, Multiaddr, ObservedRole as SubstrateObservedRole, ProtocolName, + ReputationChange, Roles, +}; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, @@ -47,9 +50,8 @@ use polkadot_node_subsystem_test_helpers::{ mock::new_leaf, SingleItemSink, SingleItemStream, TestSubsystemContextHandle, }; use polkadot_node_subsystem_util::metered; -use polkadot_primitives::{AuthorityDiscoveryId, CandidateHash, Hash}; +use polkadot_primitives::{AuthorityDiscoveryId, Hash}; -use sc_network::Multiaddr; use sp_keyring::Sr25519Keyring; use crate::{network::Network, validator_discovery::AuthorityDiscovery}; @@ -64,10 +66,9 @@ pub enum NetworkAction { WriteNotification(PeerId, PeerSet, Vec), } -// The subsystem's view of the network - only supports a single call to `event_stream`. +// The subsystem's view of the network. #[derive(Clone)] struct TestNetwork { - net_events: Arc>>>, action_tx: Arc>>, protocol_names: Arc, } @@ -79,37 +80,42 @@ struct TestAuthorityDiscovery; // of `NetworkAction`s. struct TestNetworkHandle { action_rx: metered::UnboundedMeteredReceiver, - net_tx: SingleItemSink, - protocol_names: PeerSetProtocolNames, + validation_tx: SingleItemSink, + collation_tx: SingleItemSink, } fn new_test_network( protocol_names: PeerSetProtocolNames, -) -> (TestNetwork, TestNetworkHandle, TestAuthorityDiscovery) { - let (net_tx, net_rx) = polkadot_node_subsystem_test_helpers::single_item_sink(); +) -> ( + TestNetwork, + TestNetworkHandle, + TestAuthorityDiscovery, + Box, + Box, +) { let (action_tx, action_rx) = metered::unbounded(); + let (validation_tx, validation_rx) = polkadot_node_subsystem_test_helpers::single_item_sink(); + let (collation_tx, collation_rx) = polkadot_node_subsystem_test_helpers::single_item_sink(); + let action_tx = Arc::new(Mutex::new(action_tx)); ( TestNetwork { - net_events: Arc::new(Mutex::new(Some(net_rx))), - action_tx: Arc::new(Mutex::new(action_tx)), + action_tx: action_tx.clone(), protocol_names: Arc::new(protocol_names.clone()), }, - TestNetworkHandle { action_rx, net_tx, protocol_names }, + TestNetworkHandle { action_rx, validation_tx, collation_tx }, TestAuthorityDiscovery, + Box::new(TestNotificationService::new( + PeerSet::Validation, + action_tx.clone(), + validation_rx, + )), + Box::new(TestNotificationService::new(PeerSet::Collation, action_tx, collation_rx)), ) } #[async_trait] impl Network for TestNetwork { - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { - self.net_events - .lock() - .take() - .expect("Subsystem made more than one call to `event_stream`") - .boxed() - } - async fn set_reserved_peers( &mut self, _protocol: ProtocolName, @@ -143,7 +149,8 @@ impl Network for TestNetwork { } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap(); + let (peer_set, version) = self.protocol_names.try_get_protocol(&protocol).unwrap(); + assert_eq!(version, peer_set.get_main_version()); self.action_tx .lock() @@ -151,13 +158,10 @@ impl Network for TestNetwork { .unwrap(); } - fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap(); - - self.action_tx - .lock() - .unbounded_send(NetworkAction::WriteNotification(who, peer_set, message)) - .unwrap(); + fn peer_role(&self, _peer_id: PeerId, handshake: Vec) -> Option { + Roles::decode_all(&mut &handshake[..]) + .ok() + .and_then(|role| Some(SubstrateObservedRole::from(role))) } } @@ -201,35 +205,85 @@ impl TestNetworkHandle { peer_set: PeerSet, role: ObservedRole, ) { - let protocol_version = ProtocolVersion::from(protocol_version); - self.send_network_event(NetworkEvent::NotificationStreamOpened { - remote: peer, - protocol: self.protocol_names.get_name(peer_set, protocol_version), - negotiated_fallback: None, - role: role.into(), - received_handshake: vec![], - }) - .await; + fn observed_role_to_handshake(role: &ObservedRole) -> Vec { + match role { + &ObservedRole::Light => Roles::LIGHT.encode(), + &ObservedRole::Authority => Roles::AUTHORITY.encode(), + &ObservedRole::Full => Roles::FULL.encode(), + } + } + + // because of how protocol negotiation works, if two peers support at least one common + // protocol, the protocol is negotiated over the main protocol (`ValidationVersion::V2`) but + // if either one of the peers used a fallback protocol for the negotiation (meaning they + // don't support the main protocol but some older version of it ), `negotiated_fallback` is + // set to that protocol. + let negotiated_fallback = match protocol_version { + ValidationVersion::V2 => None, + ValidationVersion::V1 => match peer_set { + PeerSet::Validation => Some(ProtocolName::from("/polkadot/validation/1")), + PeerSet::Collation => Some(ProtocolName::from("/polkadot/collation/1")), + }, + ValidationVersion::VStaging => match peer_set { + PeerSet::Validation => Some(ProtocolName::from("/polkadot/validation/3")), + PeerSet::Collation => unreachable!(), + }, + }; + + match peer_set { + PeerSet::Validation => { + self.validation_tx + .send(NotificationEvent::NotificationStreamOpened { + peer, + direction: Direction::Inbound, + handshake: observed_role_to_handshake(&role), + negotiated_fallback, + }) + .await + .expect("subsystem concluded early"); + }, + PeerSet::Collation => { + self.collation_tx + .send(NotificationEvent::NotificationStreamOpened { + peer, + direction: Direction::Inbound, + handshake: observed_role_to_handshake(&role), + negotiated_fallback, + }) + .await + .expect("subsystem concluded early"); + }, + } } async fn disconnect_peer(&mut self, peer: PeerId, peer_set: PeerSet) { - self.send_network_event(NetworkEvent::NotificationStreamClosed { - remote: peer, - protocol: self.protocol_names.get_main_name(peer_set), - }) - .await; + match peer_set { + PeerSet::Validation => self + .validation_tx + .send(NotificationEvent::NotificationStreamClosed { peer }) + .await + .expect("subsystem concluded early"), + PeerSet::Collation => self + .collation_tx + .send(NotificationEvent::NotificationStreamClosed { peer }) + .await + .expect("subsystem concluded early"), + } } async fn peer_message(&mut self, peer: PeerId, peer_set: PeerSet, message: Vec) { - self.send_network_event(NetworkEvent::NotificationsReceived { - remote: peer, - messages: vec![(self.protocol_names.get_main_name(peer_set), message.into())], - }) - .await; - } - - async fn send_network_event(&mut self, event: NetworkEvent) { - self.net_tx.send(event).await.expect("subsystem concluded early"); + match peer_set { + PeerSet::Validation => self + .validation_tx + .send(NotificationEvent::NotificationReceived { peer, notification: message }) + .await + .expect("subsystem concluded early"), + PeerSet::Collation => self + .collation_tx + .send(NotificationEvent::NotificationReceived { peer, notification: message }) + .await + .expect("subsystem concluded early"), + } } } @@ -240,6 +294,121 @@ fn assert_network_actions_contains(actions: &[NetworkAction], action: &NetworkAc } } +struct TestNotificationService { + peer_set: PeerSet, + action_tx: Arc>>, + rx: SingleItemStream, +} + +impl std::fmt::Debug for TestNotificationService { + fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Ok(()) + } +} + +impl TestNotificationService { + pub fn new( + peer_set: PeerSet, + action_tx: Arc>>, + rx: SingleItemStream, + ) -> Self { + Self { peer_set, action_tx, rx } + } +} + +struct TestMessageSink { + peer: PeerId, + peer_set: PeerSet, + action_tx: Arc>>, +} + +impl TestMessageSink { + fn new( + peer: PeerId, + peer_set: PeerSet, + action_tx: Arc>>, + ) -> TestMessageSink { + Self { peer, peer_set, action_tx } + } +} + +#[async_trait::async_trait] +impl MessageSink for TestMessageSink { + fn send_sync_notification(&self, notification: Vec) { + self.action_tx + .lock() + .unbounded_send(NetworkAction::WriteNotification( + self.peer, + self.peer_set, + notification, + )) + .unwrap(); + } + + async fn send_async_notification( + &self, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } +} + +#[async_trait::async_trait] +impl NotificationService for TestNotificationService { + /// Instruct `Notifications` to open a new substream for `peer`. + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Instruct `Notifications` to close substream for `peer`. + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec) { + unimplemented!(); + } + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + async fn send_async_notification( + &self, + _peer: &PeerId, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + fn try_set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option { + self.rx.next().await + } + + // Clone [`NotificationService`] + fn clone(&mut self) -> Result, ()> { + unimplemented!(); + } + + /// Get protocol name. + fn protocol(&self) -> &ProtocolName { + unimplemented!(); + } + + /// Get notification sink of the peer. + fn message_sink(&self, peer: &PeerId) -> Option> { + Some(Box::new(TestMessageSink::new(*peer, self.peer_set, self.action_tx.clone()))) + } +} + #[derive(Clone)] struct TestSyncOracle { is_major_syncing: Arc, @@ -335,10 +504,11 @@ fn test_harness>( let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, fork_id); let pool = sp_core::testing::TaskExecutor::new(); - let (mut network, network_handle, discovery) = new_test_network(peerset_protocol_names.clone()); + let (network, network_handle, discovery, validation_service, collation_service) = + new_test_network(peerset_protocol_names.clone()); let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); - let network_stream = network.event_stream(); + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); let shared = Shared::default(); let bridge = NetworkBridgeRx { @@ -348,9 +518,12 @@ fn test_harness>( sync_oracle, shared: shared.clone(), peerset_protocol_names, + validation_service, + collation_service, + notification_sinks, }; - let network_bridge = run_network_in(bridge, context, network_stream) + let network_bridge = run_network_in(bridge, context) .map_err(|_| panic!("subsystem execution failed")) .map(|_| ()); @@ -942,8 +1115,6 @@ fn relays_collation_protocol_messages() { .await; } - // peer A gets reported for sending a collation message. - let collator_protocol_message = protocol_v1::CollatorProtocolMessage::Declare( Sr25519Keyring::Alice.public().into(), Default::default(), @@ -953,19 +1124,23 @@ fn relays_collation_protocol_messages() { let message_v1 = protocol_v1::CollationProtocol::CollatorProtocol(collator_protocol_message.clone()); - network_handle - .peer_message( - peer_a, - PeerSet::Collation, - WireMessage::ProtocolMessage(message_v1.clone()).encode(), - ) - .await; - - let actions = network_handle.next_network_actions(3).await; - assert_network_actions_contains( - &actions, - &NetworkAction::ReputationChange(peer_a, UNCONNECTED_PEERSET_COST.into()), - ); + // peer A gets reported for sending a collation message. + // NOTE: this is not possible since peer A cannot send + // a collation message if it has not opened a collation protocol + + // network_handle + // .peer_message( + // peer_a, + // PeerSet::Collation, + // WireMessage::ProtocolMessage(message_v1.clone()).encode(), + // ) + // .await; + + // let actions = network_handle.next_network_actions(3).await; + // assert_network_actions_contains( + // &actions, + // &NetworkAction::ReputationChange(peer_a, UNCONNECTED_PEERSET_COST.into()), + // ); // peer B has the message relayed. @@ -1212,7 +1387,7 @@ fn our_view_updates_decreasing_order_and_limited_to_max() { fn network_protocol_versioning_view_update() { let (oracle, handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, shared } = test_harness; let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ @@ -1231,12 +1406,22 @@ fn network_protocol_versioning_view_update() { handle.await_mode_switch().await; + let mut total_validation_peers = 0; + let mut total_collation_peers = 0; + for &(peer_id, peer_set, version) in &peers { network_handle .connect_peer(peer_id, version, peer_set, ObservedRole::Full) .await; + + match peer_set { + PeerSet::Validation => total_validation_peers += 1, + PeerSet::Collation => total_collation_peers += 1, + } } + await_peer_connections(&shared, total_validation_peers, total_collation_peers).await; + let view = view![head]; let actions = network_handle.next_network_actions(4).await; @@ -1264,15 +1449,19 @@ fn network_protocol_versioning_view_update() { #[test] fn network_protocol_versioning_subsystem_msg() { + use polkadot_primitives::CandidateHash; + use std::task::Poll; + let (oracle, _handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, shared } = test_harness; let peer = PeerId::random(); network_handle .connect_peer(peer, ValidationVersion::V2, PeerSet::Validation, ObservedRole::Full) .await; + await_peer_connections(&shared, 1, 0).await; // bridge will inform about all connected peers. { diff --git a/polkadot/node/network/bridge/src/tx/mod.rs b/polkadot/node/network/bridge/src/tx/mod.rs index 5f222ad59c7..bdcd1574e33 100644 --- a/polkadot/node/network/bridge/src/tx/mod.rs +++ b/polkadot/node/network/bridge/src/tx/mod.rs @@ -32,7 +32,7 @@ use polkadot_node_subsystem::{ /// To be passed to [`FullNetworkConfiguration::add_notification_protocol`](). pub use polkadot_node_network_protocol::peer_set::{peer_sets_info, IsAuthority}; use polkadot_node_network_protocol::request_response::Requests; -use sc_network::ReputationChange; +use sc_network::{MessageSink, ReputationChange}; use crate::validator_discovery; @@ -60,6 +60,7 @@ pub struct NetworkBridgeTx { metrics: Metrics, req_protocol_names: ReqProtocolNames, peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, } impl NetworkBridgeTx { @@ -74,6 +75,7 @@ impl NetworkBridgeTx { metrics: Metrics, req_protocol_names: ReqProtocolNames, peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, ) -> Self { Self { network_service, @@ -81,6 +83,7 @@ impl NetworkBridgeTx { metrics, req_protocol_names, peerset_protocol_names, + notification_sinks, } } } @@ -107,6 +110,7 @@ async fn handle_subsystem_messages( metrics: Metrics, req_protocol_names: ReqProtocolNames, peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, ) -> Result<(), Error> where N: Network, @@ -130,6 +134,7 @@ where &metrics, &req_protocol_names, &peerset_protocol_names, + ¬ification_sinks, ) .await; }, @@ -140,13 +145,14 @@ where #[overseer::contextbounds(NetworkBridgeTx, prefix = self::overseer)] async fn handle_incoming_subsystem_communication( _ctx: &mut Context, - mut network_service: N, + network_service: N, validator_discovery: &mut validator_discovery::Service, mut authority_discovery_service: AD, msg: NetworkBridgeTxMessage, metrics: &Metrics, req_protocol_names: &ReqProtocolNames, peerset_protocol_names: &PeerSetProtocolNames, + notification_sinks: &Arc>>>, ) -> (N, AD) where N: Network, @@ -194,25 +200,22 @@ where match msg { Versioned::V1(msg) => send_validation_message_v1( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), Versioned::VStaging(msg) => send_validation_message_vstaging( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), Versioned::V2(msg) => send_validation_message_v2( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), } }, @@ -227,25 +230,22 @@ where for (peers, msg) in msgs { match msg { Versioned::V1(msg) => send_validation_message_v1( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), Versioned::VStaging(msg) => send_validation_message_vstaging( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), Versioned::V2(msg) => send_validation_message_v2( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), } } @@ -259,18 +259,16 @@ where match msg { Versioned::V1(msg) => send_collation_message_v1( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), } }, @@ -284,18 +282,16 @@ where for (peers, msg) in msgs { match msg { Versioned::V1(msg) => send_collation_message_v1( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), Versioned::V2(msg) | Versioned::VStaging(msg) => send_collation_message_v2( - &mut network_service, peers, - peerset_protocol_names, WireMessage::ProtocolMessage(msg), &metrics, + notification_sinks, ), } } @@ -389,6 +385,7 @@ where metrics, req_protocol_names, peerset_protocol_names, + notification_sinks, } = bridge; handle_subsystem_messages( @@ -398,6 +395,7 @@ where metrics, req_protocol_names, peerset_protocol_names, + notification_sinks, ) .await?; diff --git a/polkadot/node/network/bridge/src/tx/tests.rs b/polkadot/node/network/bridge/src/tx/tests.rs index 1a2d9a7a424..c3cf0f322f6 100644 --- a/polkadot/node/network/bridge/src/tx/tests.rs +++ b/polkadot/node/network/bridge/src/tx/tests.rs @@ -15,15 +15,18 @@ // along with Polkadot. If not, see . use super::*; -use futures::{executor, stream::BoxStream}; +use futures::executor; use polkadot_node_subsystem_util::TimeoutExt; use async_trait::async_trait; use parking_lot::Mutex; use std::collections::HashSet; -use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange}; +use sc_network::{ + IfDisconnected, ObservedRole as SubstrateObservedRole, ProtocolName, ReputationChange, Roles, +}; +use parity_scale_codec::DecodeAll; use polkadot_node_network_protocol::{ peer_set::{PeerSetProtocolNames, ValidationVersion}, request_response::{outgoing::Requests, ReqProtocolNames}, @@ -51,10 +54,9 @@ pub enum NetworkAction { WriteNotification(PeerId, PeerSet, Vec), } -// The subsystem's view of the network - only supports a single call to `event_stream`. +// The subsystem's view of the network. #[derive(Clone)] struct TestNetwork { - net_events: Arc>>>, action_tx: Arc>>, peerset_protocol_names: Arc, } @@ -66,37 +68,78 @@ struct TestAuthorityDiscovery; // of `NetworkAction`s. struct TestNetworkHandle { action_rx: metered::UnboundedMeteredReceiver, - net_tx: metered::MeteredSender, - peerset_protocol_names: PeerSetProtocolNames, + _peerset_protocol_names: PeerSetProtocolNames, + notification_sinks: Arc>>>, + action_tx: Arc>>, +} + +struct TestMessageSink { + peer: PeerId, + peer_set: PeerSet, + action_tx: Arc>>, +} + +impl TestMessageSink { + fn new( + peer: PeerId, + peer_set: PeerSet, + action_tx: Arc>>, + ) -> TestMessageSink { + Self { peer, peer_set, action_tx } + } +} + +#[async_trait::async_trait] +impl MessageSink for TestMessageSink { + fn send_sync_notification(&self, notification: Vec) { + self.action_tx + .lock() + .unbounded_send(NetworkAction::WriteNotification( + self.peer, + self.peer_set, + notification, + )) + .unwrap(); + } + + async fn send_async_notification( + &self, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } } fn new_test_network( peerset_protocol_names: PeerSetProtocolNames, -) -> (TestNetwork, TestNetworkHandle, TestAuthorityDiscovery) { - let (net_tx, net_rx) = metered::channel(10); +) -> ( + TestNetwork, + TestNetworkHandle, + TestAuthorityDiscovery, + Arc>>>, +) { let (action_tx, action_rx) = metered::unbounded(); + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); + let action_tx = Arc::new(Mutex::new(action_tx)); ( TestNetwork { - net_events: Arc::new(Mutex::new(Some(net_rx))), - action_tx: Arc::new(Mutex::new(action_tx)), + action_tx: action_tx.clone(), peerset_protocol_names: Arc::new(peerset_protocol_names.clone()), }, - TestNetworkHandle { action_rx, net_tx, peerset_protocol_names }, + TestNetworkHandle { + action_rx, + _peerset_protocol_names: peerset_protocol_names, + action_tx, + notification_sinks: notification_sinks.clone(), + }, TestAuthorityDiscovery, + notification_sinks, ) } #[async_trait] impl Network for TestNetwork { - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { - self.net_events - .lock() - .take() - .expect("Subsystem made more than one call to `event_stream`") - .boxed() - } - async fn set_reserved_peers( &mut self, _protocol: ProtocolName, @@ -130,7 +173,8 @@ impl Network for TestNetwork { } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); + let (peer_set, version) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); + assert_eq!(version, peer_set.get_main_version()); self.action_tx .lock() @@ -138,13 +182,10 @@ impl Network for TestNetwork { .unwrap(); } - fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); - - self.action_tx - .lock() - .unbounded_send(NetworkAction::WriteNotification(who, peer_set, message)) - .unwrap(); + fn peer_role(&self, _peer_id: PeerId, handshake: Vec) -> Option { + Roles::decode_all(&mut &handshake[..]) + .ok() + .and_then(|role| Some(SubstrateObservedRole::from(role))) } } @@ -174,23 +215,14 @@ impl TestNetworkHandle { async fn connect_peer( &mut self, peer: PeerId, - protocol_version: ValidationVersion, + _protocol_version: ValidationVersion, peer_set: PeerSet, - role: ObservedRole, + _role: ObservedRole, ) { - let protocol_version = ProtocolVersion::from(protocol_version); - self.send_network_event(NetworkEvent::NotificationStreamOpened { - remote: peer, - protocol: self.peerset_protocol_names.get_name(peer_set, protocol_version), - negotiated_fallback: None, - role: role.into(), - received_handshake: vec![], - }) - .await; - } - - async fn send_network_event(&mut self, event: NetworkEvent) { - self.net_tx.send(event).await.expect("subsystem concluded early"); + self.notification_sinks.lock().insert( + (peer_set, peer), + Box::new(TestMessageSink::new(peer, peer_set, self.action_tx.clone())), + ); } } @@ -208,7 +240,8 @@ fn test_harness>(test: impl FnOnce(TestHarne let peerset_protocol_names = PeerSetProtocolNames::new(genesis_hash, fork_id); let pool = sp_core::testing::TaskExecutor::new(); - let (network, network_handle, discovery) = new_test_network(peerset_protocol_names.clone()); + let (network, network_handle, discovery, network_notification_sinks) = + new_test_network(peerset_protocol_names.clone()); let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); @@ -219,6 +252,7 @@ fn test_harness>(test: impl FnOnce(TestHarne Metrics(None), req_protocol_names, peerset_protocol_names, + network_notification_sinks, ); let network_bridge_out_fut = run_network_out(bridge_out, context) @@ -364,9 +398,9 @@ fn network_protocol_versioning_send() { approval_distribution_message.clone(), ); - // Note that bridge doesn't ensure neither peer's protocol version - // or peer set match the message. - let receivers = vec![peer_ids[0], peer_ids[3]]; + // only `peer_ids[0]` opened the validation protocol v2 + // so only they will be sent a notification + let receivers = vec![peer_ids[0]]; virtual_overseer .send(FromOrchestra::Communication { msg: NetworkBridgeTxMessage::SendValidationMessage( @@ -406,7 +440,9 @@ fn network_protocol_versioning_send() { let msg = protocol_v2::CollationProtocol::CollatorProtocol(collator_protocol_message.clone()); - let receivers = vec![peer_ids[1], peer_ids[2]]; + // only `peer_ids[0]` opened the collation protocol v2 + // so only they will be sent a notification + let receivers = vec![peer_ids[1]]; virtual_overseer .send(FromOrchestra::Communication { diff --git a/polkadot/node/network/bridge/src/validator_discovery.rs b/polkadot/node/network/bridge/src/validator_discovery.rs index 86e861fbc5b..b11af8a8a08 100644 --- a/polkadot/node/network/bridge/src/validator_discovery.rs +++ b/polkadot/node/network/bridge/src/validator_discovery.rs @@ -169,13 +169,12 @@ mod tests { use crate::network::Network; use async_trait::async_trait; - use futures::stream::BoxStream; use polkadot_node_network_protocol::{ request_response::{outgoing::Requests, ReqProtocolNames}, PeerId, }; use polkadot_primitives::Hash; - use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName, ReputationChange}; + use sc_network::{IfDisconnected, ProtocolName, ReputationChange}; use sp_keyring::Sr25519Keyring; use std::collections::{HashMap, HashSet}; @@ -224,10 +223,6 @@ mod tests { #[async_trait] impl Network for TestNetwork { - fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent> { - panic!() - } - async fn set_reserved_peers( &mut self, _protocol: ProtocolName, @@ -263,7 +258,11 @@ mod tests { panic!() } - fn write_notification(&self, _: PeerId, _: ProtocolName, _: Vec) { + fn peer_role( + &self, + _peer_id: PeerId, + _handshake: Vec, + ) -> Option { panic!() } } diff --git a/polkadot/node/network/protocol/src/peer_set.rs b/polkadot/node/network/protocol/src/peer_set.rs index eb483dec970..7e257d508b5 100644 --- a/polkadot/node/network/protocol/src/peer_set.rs +++ b/polkadot/node/network/protocol/src/peer_set.rs @@ -21,6 +21,7 @@ use polkadot_primitives::Hash; use sc_network::{ config::{NonDefaultSetConfig, SetConfig}, types::ProtocolName, + NotificationService, }; use std::{ collections::{hash_map::Entry, HashMap}, @@ -68,7 +69,7 @@ impl PeerSet { self, is_authority: IsAuthority, peerset_protocol_names: &PeerSetProtocolNames, - ) -> NonDefaultSetConfig { + ) -> (NonDefaultSetConfig, (PeerSet, Box)) { // Networking layer relies on `get_main_name()` being the main name of the protocol // for peersets and connection management. let protocol = peerset_protocol_names.get_main_name(self); @@ -76,39 +77,47 @@ impl PeerSet { let max_notification_size = self.get_max_notification_size(is_authority); match self { - PeerSet::Validation => NonDefaultSetConfig { - notifications_protocol: protocol, - fallback_names, - max_notification_size, - handshake: None, - set_config: SetConfig { - // we allow full nodes to connect to validators for gossip - // to ensure any `MIN_GOSSIP_PEERS` always include reserved peers - // we limit the amount of non-reserved slots to be less - // than `MIN_GOSSIP_PEERS` in total - in_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1, - out_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1, - reserved_nodes: Vec::new(), - non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept, - }, + PeerSet::Validation => { + let (config, notification_service) = NonDefaultSetConfig::new( + protocol, + fallback_names, + max_notification_size, + None, + SetConfig { + // we allow full nodes to connect to validators for gossip + // to ensure any `MIN_GOSSIP_PEERS` always include reserved peers + // we limit the amount of non-reserved slots to be less + // than `MIN_GOSSIP_PEERS` in total + in_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1, + out_peers: super::MIN_GOSSIP_PEERS as u32 / 2 - 1, + reserved_nodes: Vec::new(), + non_reserved_mode: sc_network::config::NonReservedPeerMode::Accept, + }, + ); + + (config, (PeerSet::Validation, notification_service)) }, - PeerSet::Collation => NonDefaultSetConfig { - notifications_protocol: protocol, - fallback_names, - max_notification_size, - handshake: None, - set_config: SetConfig { - // Non-authority nodes don't need to accept incoming connections on this peer - // set: - in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 }, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: if is_authority == IsAuthority::Yes { - sc_network::config::NonReservedPeerMode::Accept - } else { - sc_network::config::NonReservedPeerMode::Deny + PeerSet::Collation => { + let (config, notification_service) = NonDefaultSetConfig::new( + protocol, + fallback_names, + max_notification_size, + None, + SetConfig { + // Non-authority nodes don't need to accept incoming connections on this + // peer set: + in_peers: if is_authority == IsAuthority::Yes { 100 } else { 0 }, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: if is_authority == IsAuthority::Yes { + sc_network::config::NonReservedPeerMode::Accept + } else { + sc_network::config::NonReservedPeerMode::Deny + }, }, - }, + ); + + (config, (PeerSet::Collation, notification_service)) }, } } @@ -204,7 +213,7 @@ impl IndexMut for PerPeerSet { pub fn peer_sets_info( is_authority: IsAuthority, peerset_protocol_names: &PeerSetProtocolNames, -) -> Vec { +) -> Vec<(NonDefaultSetConfig, (PeerSet, Box))> { PeerSet::iter() .map(|s| s.get_info(is_authority, &peerset_protocol_names)) .collect() @@ -286,7 +295,7 @@ impl From for ProtocolVersion { } /// On the wire protocol name to [`PeerSet`] mapping. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct PeerSetProtocolNames { protocols: HashMap, names: HashMap<(PeerSet, ProtocolVersion), ProtocolName>, diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index e7a4f4a825c..19efd1b66c4 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -89,6 +89,7 @@ kvdb = "0.13.0" kvdb-rocksdb = { version = "0.19.0", optional = true } parity-db = { version = "0.4.8", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } +parking_lot = "0.12.1" # Polkadot polkadot-core-primitives = { path = "../../core-primitives" } diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index 5069ec467c9..70159301fc4 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -51,7 +51,8 @@ use { }, polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig, polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, request_response::ReqProtocolNames, + peer_set::{PeerSet, PeerSetProtocolNames}, + request_response::ReqProtocolNames, }, sc_client_api::BlockBackend, sc_transaction_pool_api::OffchainTransactionPoolFactory, @@ -74,7 +75,7 @@ pub use { #[cfg(feature = "full-node")] use polkadot_node_subsystem::jaeger; -use std::{path::PathBuf, sync::Arc, time::Duration}; +use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; use prometheus_endpoint::Registry; #[cfg(feature = "full-node")] @@ -809,9 +810,9 @@ pub fn new_full( // anything in terms of behaviour, but makes the logs more consistent with the other // Substrate nodes. let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec); - net_config.add_notification_protocol(grandpa::grandpa_peers_set_config( - grandpa_protocol_name.clone(), - )); + let (grandpa_protocol_config, grandpa_notification_service) = + grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()); + net_config.add_notification_protocol(grandpa_protocol_config); let beefy_gossip_proto_name = beefy::gossip_protocol_name(&genesis_hash, config.chain_spec.fork_id()); @@ -824,12 +825,17 @@ pub fn new_full( client.clone(), prometheus_registry.clone(), ); - if enable_beefy { - net_config.add_notification_protocol(beefy::communication::beefy_peers_set_config( - beefy_gossip_proto_name.clone(), - )); - net_config.add_request_response_protocol(beefy_req_resp_cfg); - } + let beefy_notification_service = match enable_beefy { + false => None, + true => { + let (beefy_notification_config, beefy_notification_service) = + beefy::communication::beefy_peers_set_config(beefy_gossip_proto_name.clone()); + + net_config.add_notification_protocol(beefy_notification_config); + net_config.add_request_response_protocol(beefy_req_resp_cfg); + Some(beefy_notification_service) + }, + }; // validation/collation protocols are enabled only if `Overseer` is enabled let peerset_protocol_names = @@ -840,13 +846,21 @@ pub fn new_full( // // Collators and parachain full nodes require the collator and validator networking to send // collations and to be able to recover PoVs. - if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { - use polkadot_network_bridge::{peer_sets_info, IsAuthority}; - let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; - for config in peer_sets_info(is_authority, &peerset_protocol_names) { - net_config.add_notification_protocol(config); - } - } + let notification_services = + if role.is_authority() || is_parachain_node.is_running_alongside_parachain_node() { + use polkadot_network_bridge::{peer_sets_info, IsAuthority}; + let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; + + peer_sets_info(is_authority, &peerset_protocol_names) + .into_iter() + .map(|(config, (peerset, service))| { + net_config.add_notification_protocol(config); + (peerset, service) + }) + .collect::>>() + } else { + std::collections::HashMap::new() + }; let req_protocol_names = ReqProtocolNames::new(&genesis_hash, config.chain_spec.fork_id()); @@ -1078,6 +1092,7 @@ pub fn new_full( offchain_transaction_pool_factory: OffchainTransactionPoolFactory::new( transaction_pool.clone(), ), + notification_services, }, ) .map_err(|e| { @@ -1179,13 +1194,15 @@ pub fn new_full( // need a keystore, regardless of which protocol we use below. let keystore_opt = if role.is_authority() { Some(keystore_container.keystore()) } else { None }; - if enable_beefy { + // beefy is enabled if its notification service exists + if let Some(notification_service) = beefy_notification_service { let justifications_protocol_name = beefy_on_demand_justifications_handler.protocol_name(); let network_params = beefy::BeefyNetworkParams { network: network.clone(), sync: sync_service.clone(), gossip_protocol_name: beefy_gossip_proto_name, justifications_protocol_name, + notification_service, _phantom: core::marker::PhantomData::, }; let payload_provider = beefy_primitives::mmr::MmrRootProvider::new(client.clone()); @@ -1265,6 +1282,7 @@ pub fn new_full( prometheus_registry: prometheus_registry.clone(), shared_voter_state, telemetry: telemetry.as_ref().map(|x| x.handle()), + notification_service: grandpa_notification_service, offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), }; diff --git a/polkadot/node/service/src/overseer.rs b/polkadot/node/service/src/overseer.rs index fd618863eea..599563d6454 100644 --- a/polkadot/node/service/src/overseer.rs +++ b/polkadot/node/service/src/overseer.rs @@ -26,7 +26,7 @@ use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig use polkadot_node_core_chain_selection::Config as ChainSelectionConfig; use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; use polkadot_node_network_protocol::{ - peer_set::PeerSetProtocolNames, + peer_set::{PeerSet, PeerSetProtocolNames}, request_response::{ v1 as request_v1, v2 as request_v2, IncomingRequestReceiver, ReqProtocolNames, }, @@ -41,15 +41,16 @@ use polkadot_overseer::{ OverseerConnector, OverseerHandle, SpawnGlue, }; +use parking_lot::Mutex; use polkadot_primitives::runtime_api::ParachainHost; use sc_authority_discovery::Service as AuthorityDiscoveryService; use sc_client_api::AuxStore; use sc_keystore::LocalKeystore; -use sc_network::NetworkStateInfo; +use sc_network::{NetworkStateInfo, NotificationService}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_consensus_babe::BabeApi; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem; pub use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem; @@ -140,6 +141,8 @@ where pub peerset_protocol_names: PeerSetProtocolNames, /// The offchain transaction pool factory. pub offchain_transaction_pool_factory: OffchainTransactionPoolFactory, + /// Notification services for validation/collation protocols. + pub notification_services: HashMap>, } /// Obtain a prepared `OverseerBuilder`, that is initialized @@ -173,6 +176,7 @@ pub fn prepared_overseer_builder( req_protocol_names, peerset_protocol_names, offchain_transaction_pool_factory, + notification_services, }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< @@ -218,6 +222,7 @@ where use polkadot_node_subsystem_util::metrics::Metrics; let metrics = ::register(registry)?; + let notification_sinks = Arc::new(Mutex::new(HashMap::new())); let spawner = SpawnGlue(spawner); @@ -235,6 +240,7 @@ where network_bridge_metrics.clone(), req_protocol_names, peerset_protocol_names.clone(), + notification_sinks.clone(), )) .network_bridge_rx(NetworkBridgeRxSubsystem::new( network_service.clone(), @@ -242,6 +248,8 @@ where Box::new(sync_service.clone()), network_bridge_metrics, peerset_protocol_names, + notification_services, + notification_sinks, )) .availability_distribution(AvailabilityDistributionSubsystem::new( keystore.clone(), diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs index 40320282924..e69428d8190 100644 --- a/substrate/bin/node-template/node/src/service.rs +++ b/substrate/bin/node-template/node/src/service.rs @@ -163,9 +163,9 @@ pub fn new_full(config: Configuration) -> Result { &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), &config.chain_spec, ); - net_config.add_notification_protocol(sc_consensus_grandpa::grandpa_peers_set_config( - grandpa_protocol_name.clone(), - )); + let (grandpa_protocol_config, grandpa_notification_service) = + sc_consensus_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()); + net_config.add_notification_protocol(grandpa_protocol_config); let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( backend.clone(), @@ -316,6 +316,7 @@ pub fn new_full(config: Configuration) -> Result { link: grandpa_link, network, sync: Arc::new(sync_service), + notification_service: grandpa_notification_service, voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(), prometheus_registry, shared_voter_state: SharedVoterState::empty(), diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 1c71b5a3956..a746de8de84 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -370,28 +370,28 @@ pub fn new_full_base( let shared_voter_state = rpc_setup; let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); - let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); let grandpa_protocol_name = grandpa::protocol_standard_name(&genesis_hash, &config.chain_spec); - net_config.add_notification_protocol(grandpa::grandpa_peers_set_config( - grandpa_protocol_name.clone(), - )); + let (grandpa_protocol_config, grandpa_notification_service) = + grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()); + net_config.add_notification_protocol(grandpa_protocol_config); - let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( - genesis_hash, - config.chain_spec.fork_id(), - ); - net_config.add_notification_protocol(statement_handler_proto.set_config()); + let (statement_handler_proto, statement_config) = + sc_network_statement::StatementHandlerPrototype::new( + genesis_hash, + config.chain_spec.fork_id(), + ); + net_config.add_notification_protocol(statement_config); let mixnet_protocol_name = sc_mixnet::protocol_name(genesis_hash.as_ref(), config.chain_spec.fork_id()); - if let Some(mixnet_config) = &mixnet_config { - net_config.add_notification_protocol(sc_mixnet::peers_set_config( - mixnet_protocol_name.clone(), - mixnet_config, - )); - } + let mixnet_notification_service = mixnet_config.as_ref().map(|mixnet_config| { + let (config, notification_service) = + sc_mixnet::peers_set_config(mixnet_protocol_name.clone(), mixnet_config); + net_config.add_notification_protocol(config); + notification_service + }); let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( backend.clone(), @@ -422,6 +422,8 @@ pub fn new_full_base( mixnet_protocol_name, transaction_pool.clone(), Some(keystore_container.keystore()), + mixnet_notification_service + .expect("`NotificationService` exists since mixnet was enabled; qed"), ); task_manager.spawn_handle().spawn("mixnet", None, mixnet); } @@ -590,6 +592,7 @@ pub fn new_full_base( link: grandpa_link, network: network.clone(), sync: Arc::new(sync_service.clone()), + notification_service: grandpa_notification_service, telemetry: telemetry.as_ref().map(|x| x.handle()), voting_rule: grandpa::VotingRulesBuilder::default().build(), prometheus_registry: prometheus_registry.clone(), diff --git a/substrate/client/consensus/beefy/src/communication/mod.rs b/substrate/client/consensus/beefy/src/communication/mod.rs index 10a6071aae6..3827559057d 100644 --- a/substrate/client/consensus/beefy/src/communication/mod.rs +++ b/substrate/client/consensus/beefy/src/communication/mod.rs @@ -67,10 +67,16 @@ pub(crate) mod beefy_protocol_name { /// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`]. pub fn beefy_peers_set_config( gossip_protocol_name: sc_network::ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { - let mut cfg = sc_network::config::NonDefaultSetConfig::new(gossip_protocol_name, 1024 * 1024); +) -> (sc_network::config::NonDefaultSetConfig, Box) { + let (mut cfg, notification_service) = sc_network::config::NonDefaultSetConfig::new( + gossip_protocol_name, + Vec::new(), + 1024 * 1024, + None, + Default::default(), + ); cfg.allow_non_reserved(25, 25); - cfg + (cfg, notification_service) } // cost scalars for reporting peers. diff --git a/substrate/client/consensus/beefy/src/lib.rs b/substrate/client/consensus/beefy/src/lib.rs index 3d104f13250..b3ff11add27 100644 --- a/substrate/client/consensus/beefy/src/lib.rs +++ b/substrate/client/consensus/beefy/src/lib.rs @@ -38,7 +38,7 @@ use parking_lot::Mutex; use prometheus::Registry; use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotifications, Finalizer}; use sc_consensus::BlockImport; -use sc_network::{NetworkRequest, ProtocolName}; +use sc_network::{NetworkRequest, NotificationService, ProtocolName}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing}; use sp_api::ProvideRuntimeApi; use sp_blockchain::{ @@ -178,6 +178,8 @@ pub struct BeefyNetworkParams { pub network: Arc, /// Syncing service implementing a sync oracle and an event stream for peers. pub sync: Arc, + /// Handle for receiving notification events. + pub notification_service: Box, /// Chain specific BEEFY gossip protocol name. See /// [`communication::beefy_protocol_name::gossip_protocol_name`]. pub gossip_protocol_name: ProtocolName, @@ -243,6 +245,7 @@ pub async fn start_beefy_gadget( let BeefyNetworkParams { network, sync, + notification_service, gossip_protocol_name, justifications_protocol_name, .. @@ -264,6 +267,7 @@ pub async fn start_beefy_gadget( let gossip_engine = GossipEngine::new( network.clone(), sync.clone(), + notification_service, gossip_protocol_name.clone(), gossip_validator.clone(), None, diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 3aaa59cbfa1..3f800166e26 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -72,7 +72,7 @@ use substrate_test_runtime_client::{BlockBuilderExt, ClientExt}; use tokio::time::Duration; const GENESIS_HASH: H256 = H256::zero(); -fn beefy_gossip_proto_name() -> ProtocolName { +pub(crate) fn beefy_gossip_proto_name() -> ProtocolName { gossip_protocol_name(GENESIS_HASH, None) } @@ -371,6 +371,7 @@ async fn voter_init_setup( let mut gossip_engine = sc_network_gossip::GossipEngine::new( net.peer(0).network_service().clone(), net.peer(0).sync_service().clone(), + net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), "/beefy/whatever", gossip_validator, None, @@ -392,6 +393,14 @@ where { let tasks = FuturesUnordered::new(); + let mut notification_services = peers + .iter() + .map(|(peer_id, _, _)| { + let peer = &mut net.peers[*peer_id]; + (*peer_id, peer.take_notification_service(&beefy_gossip_proto_name()).unwrap()) + }) + .collect::>(); + for (peer_id, key, api) in peers.into_iter() { let peer = &net.peers[peer_id]; @@ -409,6 +418,7 @@ where let network_params = crate::BeefyNetworkParams { network: peer.network_service().clone(), sync: peer.sync_service().clone(), + notification_service: notification_services.remove(&peer_id).unwrap(), gossip_protocol_name: beefy_gossip_proto_name(), justifications_protocol_name: on_demand_justif_handler.protocol_name(), _phantom: PhantomData, @@ -1045,7 +1055,25 @@ async fn should_initialize_voter_at_custom_genesis() { net.peer(0).client().as_client().finalize_block(hashes[8], None).unwrap(); // load persistent state - nothing in DB, should init at genesis - let persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); + // + // NOTE: code from `voter_init_setup()` is moved here because the new network event system + // doesn't allow creating a new `GossipEngine` as the notification handle is consumed by the + // first `GossipEngine` + let known_peers = Arc::new(Mutex::new(KnownPeers::new())); + let (gossip_validator, _) = GossipValidator::new(known_peers); + let gossip_validator = Arc::new(gossip_validator); + let mut gossip_engine = sc_network_gossip::GossipEngine::new( + net.peer(0).network_service().clone(), + net.peer(0).sync_service().clone(), + net.peer(0).take_notification_service(&beefy_gossip_proto_name()).unwrap(), + "/beefy/whatever", + gossip_validator, + None, + ); + let (beefy_genesis, best_grandpa) = + wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); + let persisted_state = + load_or_init_voter_state(&*backend, &api, beefy_genesis, best_grandpa, 1).unwrap(); // Test initialization at session boundary. // verify voter initialized with single session starting at block `custom_pallet_genesis` (7) @@ -1075,7 +1103,11 @@ async fn should_initialize_voter_at_custom_genesis() { net.peer(0).client().as_client().finalize_block(hashes[10], None).unwrap(); // load persistent state - state preset in DB, but with different pallet genesis - let new_persisted_state = voter_init_setup(&mut net, &mut finality, &api).await.unwrap(); + // the network state persists and uses the old `GossipEngine` initialized for `peer(0)` + let (beefy_genesis, best_grandpa) = + wait_for_runtime_pallet(&api, &mut gossip_engine, &mut finality).await.unwrap(); + let new_persisted_state = + load_or_init_voter_state(&*backend, &api, beefy_genesis, best_grandpa, 1).unwrap(); // verify voter initialized with single session starting at block `new_pallet_genesis` (10) let sessions = new_persisted_state.voting_oracle().sessions(); @@ -1371,7 +1403,7 @@ async fn gossipped_finality_proofs() { let api = Arc::new(TestApi::with_validator_set(&validator_set)); let beefy_peers = peers.iter().enumerate().map(|(id, key)| (id, key, api.clone())).collect(); - let charlie = &net.peers[2]; + let charlie = &mut net.peers[2]; let known_peers = Arc::new(Mutex::new(KnownPeers::::new())); // Charlie will run just the gossip engine and not the full voter. let (gossip_validator, _) = GossipValidator::new(known_peers); @@ -1384,6 +1416,7 @@ async fn gossipped_finality_proofs() { let mut charlie_gossip_engine = sc_network_gossip::GossipEngine::new( charlie.network_service().clone(), charlie.sync_service().clone(), + charlie.take_notification_service(&beefy_gossip_proto_name()).unwrap(), beefy_gossip_proto_name(), charlie_gossip_validator.clone(), None, diff --git a/substrate/client/consensus/beefy/src/worker.rs b/substrate/client/consensus/beefy/src/worker.rs index 0eea5647e51..1fbda974053 100644 --- a/substrate/client/consensus/beefy/src/worker.rs +++ b/substrate/client/consensus/beefy/src/worker.rs @@ -1145,12 +1145,16 @@ pub(crate) mod tests { let api = Arc::new(TestApi::with_validator_set(&genesis_validator_set)); let network = peer.network_service().clone(); let sync = peer.sync_service().clone(); + let notification_service = peer + .take_notification_service(&crate::tests::beefy_gossip_proto_name()) + .unwrap(); let known_peers = Arc::new(Mutex::new(KnownPeers::new())); let (gossip_validator, gossip_report_stream) = GossipValidator::new(known_peers.clone()); let gossip_validator = Arc::new(gossip_validator); let gossip_engine = GossipEngine::new( network.clone(), sync.clone(), + notification_service, "/beefy/1", gossip_validator.clone(), None, diff --git a/substrate/client/consensus/grandpa/src/communication/mod.rs b/substrate/client/consensus/grandpa/src/communication/mod.rs index 6d9e956b41b..5c7e1276297 100644 --- a/substrate/client/consensus/grandpa/src/communication/mod.rs +++ b/substrate/client/consensus/grandpa/src/communication/mod.rs @@ -46,7 +46,7 @@ use finality_grandpa::{ Message::{Precommit, Prevote, PrimaryPropose}, }; use parity_scale_codec::{Decode, DecodeAll, Encode}; -use sc_network::{NetworkBlock, NetworkSyncForkRequest, ReputationChange}; +use sc_network::{NetworkBlock, NetworkSyncForkRequest, NotificationService, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_keystore::KeystorePtr; @@ -247,6 +247,7 @@ impl, S: Syncing> NetworkBridge { pub(crate) fn new( service: N, sync: S, + notification_service: Box, config: crate::Config, set_state: crate::environment::SharedVoterSetState, prometheus_registry: Option<&Registry>, @@ -260,6 +261,7 @@ impl, S: Syncing> NetworkBridge { let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( service.clone(), sync.clone(), + notification_service, protocol, validator.clone(), prometheus_registry, diff --git a/substrate/client/consensus/grandpa/src/communication/tests.rs b/substrate/client/consensus/grandpa/src/communication/tests.rs index 4a869d0f515..b76b1af93da 100644 --- a/substrate/client/consensus/grandpa/src/communication/tests.rs +++ b/substrate/client/consensus/grandpa/src/communication/tests.rs @@ -24,16 +24,17 @@ use super::{ }; use crate::{communication::grandpa_protocol_name, environment::SharedVoterSetState}; use futures::prelude::*; -use parity_scale_codec::Encode; +use parity_scale_codec::{DecodeAll, Encode}; use sc_network::{ config::{MultiaddrWithPeerId, Role}, event::Event as NetworkEvent, + service::traits::{Direction, MessageSink, NotificationEvent, NotificationService}, types::ProtocolName, Multiaddr, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT as NotificationSender, PeerId, ReputationChange, }; -use sc_network_common::role::ObservedRole; +use sc_network_common::role::{ObservedRole, Roles}; use sc_network_gossip::Validator; use sc_network_sync::{SyncEvent as SyncStreamEvent, SyncEventStream}; use sc_network_test::{Block, Hash}; @@ -123,6 +124,12 @@ impl NetworkPeers for TestNetwork { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, handshake: Vec) -> Option { + Roles::decode_all(&mut &handshake[..]) + .ok() + .and_then(|role| Some(ObservedRole::from(role))) + } } impl NetworkEventStream for TestNetwork { @@ -211,10 +218,70 @@ impl NetworkSyncForkRequest> for TestSync { fn set_sync_fork_request(&self, _peers: Vec, _hash: Hash, _number: NumberFor) {} } +#[derive(Debug)] +pub(crate) struct TestNotificationService { + sender: TracingUnboundedSender, + rx: TracingUnboundedReceiver, +} + +#[async_trait::async_trait] +impl NotificationService for TestNotificationService { + /// Instruct `Notifications` to open a new substream for `peer`. + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Instruct `Notifications` to close substream for `peer`. + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, peer: &PeerId, notification: Vec) { + let _ = self.sender.unbounded_send(Event::WriteNotification(*peer, notification)); + } + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + async fn send_async_notification( + &self, + _peer: &PeerId, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + fn try_set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option { + self.rx.next().await + } + + fn clone(&mut self) -> Result, ()> { + unimplemented!(); + } + + fn protocol(&self) -> &ProtocolName { + unimplemented!(); + } + + fn message_sink(&self, _peer: &PeerId) -> Option> { + unimplemented!(); + } +} + pub(crate) struct Tester { pub(crate) net_handle: super::NetworkBridge, gossip_validator: Arc>, pub(crate) events: TracingUnboundedReceiver, + pub(crate) notification_tx: TracingUnboundedSender, } impl Tester { @@ -279,6 +346,9 @@ fn voter_set_state() -> SharedVoterSetState { // needs to run in a tokio runtime. pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { let (tx, rx) = tracing_unbounded("test", 100_000); + let (notification_tx, notification_rx) = tracing_unbounded("test-notification", 100_000); + + let notification_service = TestNotificationService { rx: notification_rx, sender: tx.clone() }; let net = TestNetwork { sender: tx }; let sync = TestSync {}; @@ -293,14 +363,22 @@ pub(crate) fn make_test_network() -> (impl Future, TestNetwork) } } - let bridge = - super::NetworkBridge::new(net.clone(), sync, config(), voter_set_state(), None, None); + let bridge = super::NetworkBridge::new( + net.clone(), + sync, + Box::new(notification_service), + config(), + voter_set_state(), + None, + None, + ); ( futures::future::ready(Tester { gossip_validator: bridge.validator.clone(), net_handle: bridge, events: rx, + notification_tx, }), net, ) @@ -385,63 +463,62 @@ fn good_commit_leads_to_relay() { let commit_to_send = encoded_commit.clone(); let network_bridge = tester.net_handle.clone(); - // asking for global communication will cause the test network - // to send us an event asking us for a stream. use it to - // send a message. + // `NetworkBridge` will be operational as soon as it's created and it's + // waiting for events from the network. Send it events that inform that + // a notification stream was opened and that a notification was received. + // + // Since each protocol has its own notification stream, events need not be filtered. let sender_id = id; - let send_message = tester.filter_network_events(move |event| match event { - Event::EventStream(sender) => { - // Add the sending peer and send the commit - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id, - protocol: grandpa_protocol_name::NAME.into(), + + let send_message = async move { + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationStreamOpened { + peer: sender_id, + direction: Direction::Inbound, negotiated_fallback: None, - role: ObservedRole::Full, - received_handshake: vec![], + handshake: Roles::FULL.encode(), + }, + ); + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationReceived { + peer: sender_id, + notification: commit_to_send.clone(), + }, + ); + + // Add a random peer which will be the recipient of this message + let receiver_id = PeerId::random(); + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationStreamOpened { + peer: receiver_id, + direction: Direction::Inbound, + negotiated_fallback: None, + handshake: Roles::FULL.encode(), + }, + ); + + // Announce its local set being on the current set id through a neighbor + // packet, otherwise it won't be eligible to receive the commit + let _ = { + let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 1, }); - let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id, - messages: vec![( - grandpa_protocol_name::NAME.into(), - commit_to_send.clone().into(), - )], - }); + let msg = gossip::GossipMessage::::Neighbor(update); - // Add a random peer which will be the recipient of this message - let receiver_id = PeerId::random(); - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: receiver_id, - protocol: grandpa_protocol_name::NAME.into(), - negotiated_fallback: None, - role: ObservedRole::Full, - received_handshake: vec![], - }); + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationReceived { + peer: receiver_id, + notification: msg.encode(), + }, + ); + }; - // Announce its local set has being on the current set id through a neighbor - // packet, otherwise it won't be eligible to receive the commit - let _ = { - let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket { - round: Round(round), - set_id: SetId(set_id), - commit_finalized_height: 1, - }); - - let msg = gossip::GossipMessage::::Neighbor(update); - - sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: receiver_id, - messages: vec![( - grandpa_protocol_name::NAME.into(), - msg.encode().into(), - )], - }) - }; - - true - }, - _ => false, - }); + tester + } + .boxed(); // when the commit comes in, we'll tell the callback it was good. let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { @@ -537,31 +614,32 @@ fn bad_commit_leads_to_report() { let commit_to_send = encoded_commit.clone(); let network_bridge = tester.net_handle.clone(); - // asking for global communication will cause the test network - // to send us an event asking us for a stream. use it to - // send a message. + // `NetworkBridge` will be operational as soon as it's created and it's + // waiting for events from the network. Send it events that inform that + // a notification stream was opened and that a notification was received. + // + // Since each protocol has its own notification stream, events need not be filtered. let sender_id = id; - let send_message = tester.filter_network_events(move |event| match event { - Event::EventStream(sender) => { - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id, - protocol: grandpa_protocol_name::NAME.into(), + + let send_message = async move { + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationStreamOpened { + peer: sender_id, + direction: Direction::Inbound, negotiated_fallback: None, - role: ObservedRole::Full, - received_handshake: vec![], - }); - let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id, - messages: vec![( - grandpa_protocol_name::NAME.into(), - commit_to_send.clone().into(), - )], - }); + handshake: Roles::FULL.encode(), + }, + ); + let _ = tester.notification_tx.unbounded_send( + NotificationEvent::NotificationReceived { + peer: sender_id, + notification: commit_to_send.clone(), + }, + ); - true - }, - _ => false, - }); + tester + } + .boxed(); // when the commit comes in, we'll tell the callback it was bad. let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { diff --git a/substrate/client/consensus/grandpa/src/lib.rs b/substrate/client/consensus/grandpa/src/lib.rs index a4584e6fc80..b7cfc9f5b60 100644 --- a/substrate/client/consensus/grandpa/src/lib.rs +++ b/substrate/client/consensus/grandpa/src/lib.rs @@ -67,7 +67,7 @@ use sc_client_api::{ BlockchainEvents, CallExecutor, ExecutorProvider, Finalizer, LockImportRun, StorageProvider, }; use sc_consensus::BlockImport; -use sc_network::types::ProtocolName; +use sc_network::{types::ProtocolName, NotificationService}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; @@ -687,6 +687,8 @@ pub struct GrandpaParams { pub network: N, /// Event stream for syncing-related events. pub sync: S, + /// Handle for interacting with `Notifications`. + pub notification_service: Box, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, /// The prometheus metrics registry. @@ -707,21 +709,21 @@ pub struct GrandpaParams { /// For standard protocol name see [`crate::protocol_standard_name`]. pub fn grandpa_peers_set_config( protocol_name: ProtocolName, -) -> sc_network::config::NonDefaultSetConfig { +) -> (sc_network::config::NonDefaultSetConfig, Box) { use communication::grandpa_protocol_name; - sc_network::config::NonDefaultSetConfig { - notifications_protocol: protocol_name, - fallback_names: grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), + sc_network::config::NonDefaultSetConfig::new( + protocol_name, + grandpa_protocol_name::LEGACY_NAMES.iter().map(|&n| n.into()).collect(), // Notifications reach ~256kiB in size at the time of writing on Kusama and Polkadot. - max_notification_size: 1024 * 1024, - handshake: None, - set_config: sc_network::config::SetConfig { + 1024 * 1024, + None, + sc_network::config::SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: sc_network::config::NonReservedPeerMode::Deny, }, - } + ) } /// Run a GRANDPA voter as a task. Provide configuration and a link to a @@ -744,6 +746,7 @@ where link, network, sync, + notification_service, voting_rule, prometheus_registry, shared_voter_state, @@ -770,6 +773,7 @@ where let network = NetworkBridge::new( network, sync, + notification_service, config.clone(), persistent_data.set_state.clone(), prometheus_registry.as_ref(), diff --git a/substrate/client/consensus/grandpa/src/observer.rs b/substrate/client/consensus/grandpa/src/observer.rs index 8541baa822b..608ff5e46a0 100644 --- a/substrate/client/consensus/grandpa/src/observer.rs +++ b/substrate/client/consensus/grandpa/src/observer.rs @@ -28,6 +28,7 @@ use futures::prelude::*; use log::{debug, info, warn}; use sc_client_api::backend::Backend; +use sc_network::NotificationService; use sc_telemetry::TelemetryHandle; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; @@ -168,6 +169,7 @@ pub fn run_grandpa_observer( link: LinkHalf, network: N, sync: S, + notification_service: Box, ) -> sp_blockchain::Result + Send> where BE: Backend + Unpin + 'static, @@ -189,6 +191,7 @@ where let network = NetworkBridge::new( network, sync, + notification_service, config.clone(), persistent_data.set_state.clone(), None, @@ -414,14 +417,14 @@ mod tests { use futures::executor; - /// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`. Regression - /// test for bug introduced in d4fbb897c and fixed in b7af8b339. + /// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`. + /// Regression test for bug introduced in d4fbb897c and fixed in b7af8b339. /// - /// When polled, `NetworkBridge` forwards reputation change requests from the `GossipValidator` - /// to the underlying `dyn Network`. This test triggers a reputation change by calling - /// `GossipValidator::validate` with an invalid gossip message. After polling the `ObserverWork` - /// which should poll the `NetworkBridge`, the reputation change should be forwarded to the test - /// network. + /// When polled, `NetworkBridge` forwards reputation change requests from the + /// `GossipValidator` to the underlying `dyn Network`. This test triggers a reputation change + /// by calling `GossipValidator::validate` with an invalid gossip message. After polling the + /// `ObserverWork` which should poll the `NetworkBridge`, the reputation change should be + /// forwarded to the test network. #[test] fn observer_work_polls_underlying_network_bridge() { // Create a test network. @@ -463,12 +466,6 @@ mod tests { // validator to the test network. assert!(observer.now_or_never().is_none()); - // Ignore initial event stream request by gossip engine. - match tester.events.next().now_or_never() { - Some(Some(Event::EventStream(_))) => {}, - _ => panic!("expected event stream request"), - }; - assert_matches!(tester.events.next().now_or_never(), Some(Some(Event::Report(_, _)))); }); } diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 644befe9885..7e42c2d45c7 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -317,6 +317,9 @@ fn initialize_grandpa( (net.peers[peer_id].network_service().clone(), link) }; let sync = net.peers[peer_id].sync_service().clone(); + let notification_service = net.peers[peer_id] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); let grandpa_params = GrandpaParams { config: Config { @@ -332,6 +335,7 @@ fn initialize_grandpa( link, network: net_service, sync, + notification_service, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -472,6 +476,9 @@ async fn finalize_3_voters_1_full_observer() { let net_service = net.peers[peer_id].network_service().clone(); let sync = net.peers[peer_id].sync_service().clone(); let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + let notification_service = net.peers[peer_id] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); let grandpa_params = GrandpaParams { config: Config { @@ -487,6 +494,7 @@ async fn finalize_3_voters_1_full_observer() { link, network: net_service, sync, + notification_service, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -557,14 +565,17 @@ async fn transition_3_voters_twice_1_full_observer() { for (peer_id, local_key) in all_peers.clone().into_iter().enumerate() { let keystore = create_keystore(local_key); - let (net_service, link, sync) = { - let net = net.lock(); + let (net_service, link, sync, notification_service) = { + let mut net = net.lock(); let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); ( net.peers[peer_id].network_service().clone(), link, net.peers[peer_id].sync_service().clone(), + net.peers[peer_id] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(), ) }; @@ -582,6 +593,7 @@ async fn transition_3_voters_twice_1_full_observer() { link, network: net_service, sync, + notification_service, voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1025,6 +1037,9 @@ async fn voter_persists_its_votes() { communication::NetworkBridge::new( net.peers[1].network_service().clone(), net.peers[1].sync_service().clone(), + net.peers[1] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(), config.clone(), set_state, None, @@ -1043,6 +1058,9 @@ async fn voter_persists_its_votes() { (net.peers[0].network_service().clone(), link) }; let sync = net.peers[0].sync_service().clone(); + let notification_service = net.peers[0] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); let grandpa_params = GrandpaParams { config: Config { @@ -1058,6 +1076,7 @@ async fn voter_persists_its_votes() { link, network: net_service, sync, + notification_service, voting_rule: VotingRulesBuilder::default().build(), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1082,6 +1101,9 @@ async fn voter_persists_its_votes() { net.add_authority_peer(); let net_service = net.peers[2].network_service().clone(); let sync = net.peers[2].sync_service().clone(); + let notification_service = net.peers[2] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); // but we'll reuse the client from the first peer (alice_voter1) // since we want to share the same database, so that we can // read the persisted state after aborting alice_voter1. @@ -1104,6 +1126,7 @@ async fn voter_persists_its_votes() { link, network: net_service, sync, + notification_service, voting_rule: VotingRulesBuilder::default().build(), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1255,6 +1278,9 @@ async fn finalize_3_voters_1_light_observer() { let mut net = GrandpaTestNet::new(TestApi::new(voters), 3, 1); let voters = initialize_grandpa(&mut net, authorities); + let notification_service = net.peers[3] + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(); let observer = observer::run_grandpa_observer( Config { gossip_duration: TEST_GOSSIP_DURATION, @@ -1269,6 +1295,7 @@ async fn finalize_3_voters_1_light_observer() { net.peers[3].data.lock().take().expect("link initialized at startup; qed"), net.peers[3].network_service().clone(), net.peers[3].sync_service().clone(), + notification_service, ) .unwrap(); net.peer(0).push_blocks(20, false); @@ -1317,6 +1344,10 @@ async fn voter_catches_up_to_latest_round_when_behind() { link, network: net.peer(peer_id).network_service().clone(), sync: net.peer(peer_id).sync_service().clone(), + notification_service: net + .peer(peer_id) + .take_notification_service(&grandpa_protocol_name::NAME.into()) + .unwrap(), voting_rule: (), prometheus_registry: None, shared_voter_state: SharedVoterState::empty(), @@ -1409,6 +1440,7 @@ fn test_environment_with_select_chain( keystore: Option, network_service: N, sync_service: S, + notification_service: Box, select_chain: SC, voting_rule: VR, ) -> TestEnvironment @@ -1433,6 +1465,7 @@ where let network = NetworkBridge::new( network_service.clone(), sync_service, + notification_service, config.clone(), set_state.clone(), None, @@ -1462,6 +1495,7 @@ fn test_environment( keystore: Option, network_service: N, sync_service: S, + notification_service: Box, voting_rule: VR, ) -> TestEnvironment, VR> where @@ -1474,6 +1508,7 @@ where keystore, network_service, sync_service, + notification_service, link.select_chain.clone(), voting_rule, ) @@ -1490,14 +1525,22 @@ async fn grandpa_environment_respects_voting_rules() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let mut notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); // add 21 blocks let hashes = peer.push_blocks(21, false); // create an environment with no voting rule restrictions - let unrestricted_env = - test_environment(&link, None, network_service.clone(), sync_service.clone(), ()); + let unrestricted_env = test_environment( + &link, + None, + network_service.clone(), + sync_service.clone(), + notification_service.clone().unwrap(), + (), + ); // another with 3/4 unfinalized chain voting rule restriction let three_quarters_env = test_environment( @@ -1505,6 +1548,7 @@ async fn grandpa_environment_respects_voting_rules() { None, network_service.clone(), sync_service.clone(), + notification_service.clone().unwrap(), voting_rule::ThreeQuartersOfTheUnfinalizedChain, ); @@ -1515,6 +1559,7 @@ async fn grandpa_environment_respects_voting_rules() { None, network_service.clone(), sync_service, + notification_service, VotingRulesBuilder::default().build(), ); @@ -1608,6 +1653,8 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); let client = peer.client().as_client().clone(); let select_chain = MockSelectChain::default(); @@ -1622,6 +1669,7 @@ async fn grandpa_environment_passes_actual_best_block_to_voting_rules() { None, network_service.clone(), sync_service, + notification_service, select_chain.clone(), voting_rule::BeforeBestBlockBy(5), ); @@ -1669,6 +1717,8 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); let client = peer.client().as_client().clone(); let select_chain = MockSelectChain::default(); @@ -1678,6 +1728,7 @@ async fn grandpa_environment_checks_if_best_block_is_descendent_of_finality_targ None, network_service.clone(), sync_service.clone(), + notification_service, select_chain.clone(), voting_rule.clone(), ); @@ -1780,11 +1831,19 @@ async fn grandpa_environment_never_overwrites_round_voter_state() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); let keystore = create_keystore(peers[0]); - let environment = - test_environment(&link, Some(keystore), network_service.clone(), sync_service, ()); + let environment = test_environment( + &link, + Some(keystore), + network_service.clone(), + sync_service, + notification_service, + (), + ); let round_state = || finality_grandpa::round::State::genesis(Default::default()); let base = || Default::default(); @@ -2012,9 +2071,18 @@ async fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { let peer = net.peer(0); let network_service = peer.network_service().clone(); let sync_service = peer.sync_service().clone(); + let notification_service = + peer.take_notification_service(&grandpa_protocol_name::NAME.into()).unwrap(); let link = peer.data.lock().take().unwrap(); let keystore = create_keystore(alice); - test_environment(&link, Some(keystore), network_service.clone(), sync_service, ()) + test_environment( + &link, + Some(keystore), + network_service.clone(), + sync_service, + notification_service, + (), + ) }; let signed_prevote = { diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index e185754b076..1c06da1e3c1 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -384,7 +384,9 @@ fn test_max_memory_pages( ) (i32.const -1) ) - (unreachable) + (then + (unreachable) + ) ) (i64.const 0) @@ -421,7 +423,9 @@ fn test_max_memory_pages( ) (i32.const -1) ) - (unreachable) + (then + (unreachable) + ) ) (i64.const 0) diff --git a/substrate/client/mixnet/Cargo.toml b/substrate/client/mixnet/Cargo.toml index 86c5a37754a..d11cb1805ff 100644 --- a/substrate/client/mixnet/Cargo.toml +++ b/substrate/client/mixnet/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = "4.1" arrayvec = "0.7.2" blake2 = "0.10.4" +bytes = "1" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } futures = "0.3.25" futures-timer = "3.0.2" diff --git a/substrate/client/mixnet/src/packet_dispatcher.rs b/substrate/client/mixnet/src/packet_dispatcher.rs index 856208ecb34..420e0c68847 100644 --- a/substrate/client/mixnet/src/packet_dispatcher.rs +++ b/substrate/client/mixnet/src/packet_dispatcher.rs @@ -24,7 +24,7 @@ use libp2p_identity::PeerId; use log::{debug, warn}; use mixnet::core::{AddressedPacket, NetworkStatus, Packet, PeerId as CorePeerId}; use parking_lot::Mutex; -use sc_network::{NetworkNotification, ProtocolName}; +use sc_network::NotificationService; use std::{collections::HashMap, future::Future, sync::Arc}; const LOG_TARGET: &str = "mixnet"; @@ -77,41 +77,37 @@ pub struct ReadyPeer { } impl ReadyPeer { - /// If a future is returned, and if that future returns `Some`, this function should be called - /// again to send the next packet queued for the peer; `self` is placed in the `Some` to make - /// this straightforward. Otherwise, we have either sent or dropped all packets queued for the - /// peer, and it can be forgotten about for the time being. + /// If a future is returned, and if that future returns `Some`, this function should be + /// called again to send the next packet queued for the peer; `self` is placed in the `Some` + /// to make this straightforward. Otherwise, we have either sent or dropped all packets + /// queued for the peer, and it can be forgotten about for the time being. pub fn send_packet( self, - network: &impl NetworkNotification, - protocol_name: ProtocolName, + notification_service: &Box, ) -> Option>> { - match network.notification_sender(self.id, protocol_name) { - Err(err) => { + match notification_service.message_sink(&self.id) { + None => { debug!( target: LOG_TARGET, - "Failed to get notification sender for peer ID {}: {err}", self.id + "Failed to get message sink for peer ID {}", self.id, ); self.queue.clear(); None }, - Ok(sender) => Some(async move { - match sender.ready().await.and_then(|mut ready| { - let (packet, more_packets) = self.queue.pop(); - let packet = - packet.expect("Should only be called if there is a packet to send"); - ready.send((packet as Box<[_]>).into())?; - Ok(more_packets) - }) { + Some(sink) => Some(async move { + let (packet, more_packets) = self.queue.pop(); + let packet = packet.expect("Should only be called if there is a packet to send"); + + match sink.send_async_notification((packet as Box<[_]>).into()).await { + Ok(_) => more_packets.then_some(self), Err(err) => { debug!( target: LOG_TARGET, - "Notification sender for peer ID {} failed: {err}", self.id + "Failed to send packet to peer ID {}: {err}", self.id, ); self.queue.clear(); None }, - Ok(more_packets) => more_packets.then(|| self), } }), } diff --git a/substrate/client/mixnet/src/protocol.rs b/substrate/client/mixnet/src/protocol.rs index 555c267b86e..955502a4856 100644 --- a/substrate/client/mixnet/src/protocol.rs +++ b/substrate/client/mixnet/src/protocol.rs @@ -18,7 +18,10 @@ use super::config::Config; use mixnet::core::PACKET_SIZE; -use sc_network::{config::NonDefaultSetConfig, ProtocolName}; +use sc_network::{ + config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig}, + NotificationService, ProtocolName, +}; /// Returns the protocol name to use for the mixnet controlled by the given chain. pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName { @@ -31,12 +34,26 @@ pub fn protocol_name(genesis_hash: &[u8], fork_id: Option<&str>) -> ProtocolName } /// Returns the peers set configuration for the mixnet protocol. -pub fn peers_set_config(name: ProtocolName, config: &Config) -> NonDefaultSetConfig { - let mut set_config = NonDefaultSetConfig::new(name, PACKET_SIZE as u64); +pub fn peers_set_config( + name: ProtocolName, + config: &Config, +) -> (NonDefaultSetConfig, Box) { + let (mut set_config, service) = NonDefaultSetConfig::new( + name, + Vec::new(), + PACKET_SIZE as u64, + None, + SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + ); if config.substrate.num_gateway_slots != 0 { // out_peers is always 0; we are only interested in connecting to mixnodes, which we do by // setting them as reserved nodes set_config.allow_non_reserved(config.substrate.num_gateway_slots, 0); } - set_config + (set_config, service) } diff --git a/substrate/client/mixnet/src/run.rs b/substrate/client/mixnet/src/run.rs index 09020469d5e..14d188df097 100644 --- a/substrate/client/mixnet/src/run.rs +++ b/substrate/client/mixnet/src/run.rs @@ -29,11 +29,12 @@ use super::{ request::{extrinsic_delay, Request, SUBMIT_EXTRINSIC}, sync_with_runtime::sync_with_runtime, }; +use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; use futures::{ future::{pending, Either}, stream::FuturesUnordered, - StreamExt, + FutureExt, StreamExt, }; use log::{debug, error, trace, warn}; use mixnet::{ @@ -43,8 +44,8 @@ use mixnet::{ }; use sc_client_api::{BlockchainEvents, HeaderBackend}; use sc_network::{ - Event::{NotificationStreamClosed, NotificationStreamOpened, NotificationsReceived}, - NetworkEventStream, NetworkNotification, NetworkPeers, NetworkStateInfo, ProtocolName, + service::traits::{NotificationEvent, ValidationResult}, + NetworkNotification, NetworkPeers, NetworkStateInfo, NotificationService, ProtocolName, }; use sc_transaction_pool_api::{ LocalTransactionPool, OffchainTransactionPoolFactory, TransactionPool, @@ -154,12 +155,13 @@ pub async fn run( protocol_name: ProtocolName, transaction_pool: Arc

, keystore: Option, + mut notification_service: Box, ) where B: Block, C: BlockchainEvents + ProvideRuntimeApi + HeaderBackend, C::Api: MixnetApi, S: SyncOracle, - N: NetworkStateInfo + NetworkEventStream + NetworkNotification + NetworkPeers, + N: NetworkStateInfo + NetworkNotification + NetworkPeers, P: TransactionPool + LocalTransactionPool + 'static, { let local_peer_id = network.local_peer_id(); @@ -189,7 +191,6 @@ pub async fn run( } else { None }; - let mut network_events = network.event_stream("mixnet").fuse(); let mut next_forward_packet_delay = MaybeInfDelay::new(None); let mut next_authored_packet_delay = MaybeInfDelay::new(None); let mut ready_peers = FuturesUnordered::new(); @@ -248,33 +249,36 @@ pub async fn run( } } - event = network_events.select_next_some() => match event { - NotificationStreamOpened { remote, protocol, .. } - if protocol == protocol_name => packet_dispatcher.add_peer(&remote), - NotificationStreamClosed { remote, protocol } - if protocol == protocol_name => packet_dispatcher.remove_peer(&remote), - NotificationsReceived { remote, messages } => { - for message in messages { - if message.0 == protocol_name { - match message.1.as_ref().try_into() { - Ok(packet) => handle_packet(packet, - &mut mixnet, &mut request_manager, &mut reply_manager, - &mut extrinsic_queue, &config.substrate), - Err(_) => debug!(target: LOG_TARGET, - "Dropped incorrectly sized packet ({} bytes) from {remote}", - message.1.len(), - ), - } - } + event = notification_service.next_event().fuse() => match event { + None => todo!(), + Some(NotificationEvent::ValidateInboundSubstream { result_tx, .. }) => { + let _ = result_tx.send(ValidationResult::Accept); + }, + Some(NotificationEvent::NotificationStreamOpened { peer, .. }) => { + packet_dispatcher.add_peer(&peer); + }, + Some(NotificationEvent::NotificationStreamClosed { peer }) => { + packet_dispatcher.remove_peer(&peer); + }, + Some(NotificationEvent::NotificationReceived { peer, notification }) => { + let notification: Bytes = notification.into(); + + match notification.as_ref().try_into() { + Ok(packet) => handle_packet(packet, + &mut mixnet, &mut request_manager, &mut reply_manager, + &mut extrinsic_queue, &config.substrate), + Err(_) => debug!(target: LOG_TARGET, + "Dropped incorrectly sized packet ({} bytes) from {peer}", + notification.len(), + ), } - } - _ => () + }, }, _ = next_forward_packet_delay => { if let Some(packet) = mixnet.pop_next_forward_packet() { if let Some(ready_peer) = packet_dispatcher.dispatch(packet) { - if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) { + if let Some(fut) = ready_peer.send_packet(¬ification_service) { ready_peers.push(fut); } } @@ -288,7 +292,7 @@ pub async fn run( _ = next_authored_packet_delay => { if let Some(packet) = mixnet.pop_next_authored_packet(&packet_dispatcher) { if let Some(ready_peer) = packet_dispatcher.dispatch(packet) { - if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) { + if let Some(fut) = ready_peer.send_packet(¬ification_service) { ready_peers.push(fut); } } @@ -297,7 +301,7 @@ pub async fn run( ready_peer = ready_peers.select_next_some() => { if let Some(ready_peer) = ready_peer { - if let Some(fut) = ready_peer.send_packet(&*network, protocol_name.clone()) { + if let Some(fut) = ready_peer.send_packet(¬ification_service) { ready_peers.push(fut); } } diff --git a/substrate/client/mixnet/src/sync_with_runtime.rs b/substrate/client/mixnet/src/sync_with_runtime.rs index 4a80b3c75f4..f3be9602541 100644 --- a/substrate/client/mixnet/src/sync_with_runtime.rs +++ b/substrate/client/mixnet/src/sync_with_runtime.rs @@ -196,6 +196,7 @@ where #[cfg(test)] mod tests { use super::*; + use multiaddr::multiaddr; #[test] fn fixup_empty_external_addresses() { diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index 95e26a232c1..5006d5d0e3e 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -29,5 +29,7 @@ sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] tokio = "1.22.0" +async-trait = "0.1.73" +codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } quickcheck = { version = "1.0.3", default-features = false } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index 8f7d490757b..c1bc414c3a3 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -21,7 +21,11 @@ use crate::{ Network, Syncing, Validator, }; -use sc_network::{event::Event, types::ProtocolName, ReputationChange}; +use sc_network::{ + service::traits::{NotificationEvent, ValidationResult}, + types::ProtocolName, + NotificationService, ReputationChange, +}; use sc_network_sync::SyncEvent; use futures::{ @@ -48,10 +52,10 @@ pub struct GossipEngine { periodic_maintenance_interval: futures_timer::Delay, protocol: ProtocolName, - /// Incoming events from the network. - network_event_stream: Pin + Send>>, /// Incoming events from the syncing service. sync_event_stream: Pin + Send>>, + /// Handle for polling notification-related events. + notification_service: Box, /// Outgoing events to the consumer. message_sinks: HashMap>>, /// Buffered messages (see [`ForwardingState`]). @@ -81,6 +85,7 @@ impl GossipEngine { pub fn new( network: N, sync: S, + notification_service: Box, protocol: impl Into, validator: Arc>, metrics_registry: Option<&Registry>, @@ -91,17 +96,16 @@ impl GossipEngine { S: Syncing + Send + Clone + 'static, { let protocol = protocol.into(); - let network_event_stream = network.event_stream("network-gossip"); let sync_event_stream = sync.event_stream("network-gossip"); GossipEngine { state_machine: ConsensusGossip::new(validator, protocol.clone(), metrics_registry), network: Box::new(network), sync: Box::new(sync), + notification_service, periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), protocol, - network_event_stream, sync_event_stream, message_sinks: HashMap::new(), forwarding_state: ForwardingState::Idle, @@ -125,7 +129,7 @@ impl GossipEngine { /// Broadcast all messages with given topic. pub fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { - self.state_machine.broadcast_topic(&mut *self.network, topic, force); + self.state_machine.broadcast_topic(&mut self.notification_service, topic, force); } /// Get data of valid, incoming messages for a topic (but might have expired meanwhile). @@ -150,19 +154,21 @@ impl GossipEngine { /// Send all messages with given topic to a peer. pub fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { - self.state_machine.send_topic(&mut *self.network, who, topic, force) + self.state_machine.send_topic(&mut self.notification_service, who, topic, force) } /// Multicast a message to all peers. pub fn gossip_message(&mut self, topic: B::Hash, message: Vec, force: bool) { - self.state_machine.multicast(&mut *self.network, topic, message, force) + self.state_machine + .multicast(&mut self.notification_service, topic, message, force) } /// Send addressed message to the given peers. The message is not kept or multicast /// later on. pub fn send_message(&mut self, who: Vec, data: Vec) { for who in &who { - self.state_machine.send_message(&mut *self.network, who, data.clone()); + self.state_machine + .send_message(&mut self.notification_service, who, data.clone()); } } @@ -173,6 +179,11 @@ impl GossipEngine { pub fn announce(&self, block: B::Hash, associated_data: Option>) { self.sync.announce_block(block, associated_data); } + + /// Consume [`GossipEngine`] and return the notification service. + pub fn take_notification_service(self) -> Box { + self.notification_service + } } impl Future for GossipEngine { @@ -184,46 +195,56 @@ impl Future for GossipEngine { 'outer: loop { match &mut this.forwarding_state { ForwardingState::Idle => { - let net_event_stream = this.network_event_stream.poll_next_unpin(cx); + let next_notification_event = + this.notification_service.next_event().poll_unpin(cx); let sync_event_stream = this.sync_event_stream.poll_next_unpin(cx); - if net_event_stream.is_pending() && sync_event_stream.is_pending() { + if next_notification_event.is_pending() && sync_event_stream.is_pending() { break } - match net_event_stream { + match next_notification_event { Poll::Ready(Some(event)) => match event { - Event::NotificationStreamOpened { remote, protocol, role, .. } => - if protocol == this.protocol { - this.state_machine.new_peer(&mut *this.network, remote, role); - }, - Event::NotificationStreamClosed { remote, protocol } => { - if protocol == this.protocol { - this.state_machine - .peer_disconnected(&mut *this.network, remote); - } + NotificationEvent::ValidateInboundSubstream { + peer, + handshake, + result_tx, + .. + } => { + // only accept peers whose role can be determined + let result = this + .network + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); }, - Event::NotificationsReceived { remote, messages } => { - let messages = messages - .into_iter() - .filter_map(|(engine, data)| { - if engine == this.protocol { - Some(data.to_vec()) - } else { - None - } - }) - .collect(); - + NotificationEvent::NotificationStreamOpened { + peer, handshake, .. + } => { + let Some(role) = this.network.peer_role(peer, handshake) else { + log::debug!(target: "gossip", "role for {peer} couldn't be determined"); + continue + }; + + this.state_machine.new_peer( + &mut this.notification_service, + peer, + role, + ); + }, + NotificationEvent::NotificationStreamClosed { peer } => { + this.state_machine + .peer_disconnected(&mut this.notification_service, peer); + }, + NotificationEvent::NotificationReceived { peer, notification } => { let to_forward = this.state_machine.on_incoming( &mut *this.network, - remote, - messages, + &mut this.notification_service, + peer, + vec![notification], ); - this.forwarding_state = ForwardingState::Busy(to_forward.into()); }, - Event::Dht(_) => {}, }, // The network event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => { @@ -306,7 +327,7 @@ impl Future for GossipEngine { while let Poll::Ready(()) = this.periodic_maintenance_interval.poll_unpin(cx) { this.periodic_maintenance_interval.reset(PERIODIC_MAINTENANCE_INTERVAL); - this.state_machine.tick(&mut *this.network); + this.state_machine.tick(&mut this.notification_service); this.message_sinks.retain(|_, sinks| { sinks.retain(|sink| !sink.is_closed()); @@ -328,15 +349,19 @@ impl futures::future::FusedFuture for GossipEngine { mod tests { use super::*; use crate::{multiaddr::Multiaddr, ValidationResult, ValidatorContext}; + use codec::{DecodeAll, Encode}; use futures::{ - channel::mpsc::{unbounded, UnboundedSender}, + channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, executor::{block_on, block_on_stream}, future::poll_fn, }; use quickcheck::{Arbitrary, Gen, QuickCheck}; use sc_network::{ - config::MultiaddrWithPeerId, NetworkBlock, NetworkEventStream, NetworkNotification, - NetworkPeers, NotificationSenderError, NotificationSenderT as NotificationSender, + config::MultiaddrWithPeerId, + service::traits::{Direction, MessageSink, NotificationEvent}, + Event, NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSenderError, NotificationSenderT as NotificationSender, NotificationService, + Roles, }; use sc_network_common::role::ObservedRole; use sc_network_sync::SyncEventStream; @@ -351,14 +376,10 @@ mod tests { use substrate_test_runtime_client::runtime::Block; #[derive(Clone, Default)] - struct TestNetwork { - inner: Arc>, - } + struct TestNetwork {} #[derive(Clone, Default)] - struct TestNetworkInner { - event_senders: Vec>, - } + struct TestNetworkInner {} impl NetworkPeers for TestNetwork { fn set_authorized_peers(&self, _peers: HashSet) { @@ -422,14 +443,17 @@ mod tests { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, handshake: Vec) -> Option { + Roles::decode_all(&mut &handshake[..]) + .ok() + .and_then(|role| Some(ObservedRole::from(role))) + } } impl NetworkEventStream for TestNetwork { fn event_stream(&self, _name: &'static str) -> Pin + Send>> { - let (tx, rx) = unbounded(); - self.inner.lock().unwrap().event_senders.push(tx); - - Box::pin(rx) + unimplemented!(); } } @@ -501,6 +525,58 @@ mod tests { } } + #[derive(Debug)] + pub(crate) struct TestNotificationService { + rx: UnboundedReceiver, + } + + #[async_trait::async_trait] + impl sc_network::service::traits::NotificationService for TestNotificationService { + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec) { + unimplemented!(); + } + + async fn send_async_notification( + &self, + _peer: &PeerId, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } + + async fn set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + fn try_set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + async fn next_event(&mut self) -> Option { + self.rx.next().await + } + + fn clone(&mut self) -> Result, ()> { + unimplemented!(); + } + + fn protocol(&self) -> &ProtocolName { + unimplemented!(); + } + + fn message_sink(&self, _peer: &PeerId) -> Option> { + unimplemented!(); + } + } + struct AllowAll; impl Validator for AllowAll { fn validate( @@ -521,16 +597,19 @@ mod tests { fn returns_when_network_event_stream_closes() { let network = TestNetwork::default(); let sync = Arc::new(TestSync::default()); + let (tx, rx) = unbounded(); + let notification_service = Box::new(TestNotificationService { rx }); let mut gossip_engine = GossipEngine::::new( network.clone(), sync, + notification_service, "/my_protocol", Arc::new(AllowAll {}), None, ); - // Drop network event stream sender side. - drop(network.inner.lock().unwrap().event_senders.pop()); + // drop notification service sender side. + drop(tx); block_on(poll_fn(move |ctx| { if let Poll::Pending = gossip_engine.poll_unpin(ctx) { @@ -550,42 +629,37 @@ mod tests { let remote_peer = PeerId::random(); let network = TestNetwork::default(); let sync = Arc::new(TestSync::default()); + let (mut tx, rx) = unbounded(); + let notification_service = Box::new(TestNotificationService { rx }); let mut gossip_engine = GossipEngine::::new( network.clone(), sync.clone(), + notification_service, protocol.clone(), Arc::new(AllowAll {}), None, ); - let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); - // Register the remote peer. - event_sender - .start_send(Event::NotificationStreamOpened { - remote: remote_peer, - protocol: protocol.clone(), - negotiated_fallback: None, - role: ObservedRole::Authority, - received_handshake: vec![], - }) - .expect("Event stream is unbounded; qed."); + tx.send(NotificationEvent::NotificationStreamOpened { + peer: remote_peer, + direction: Direction::Inbound, + negotiated_fallback: None, + handshake: Roles::FULL.encode(), + }) + .await + .unwrap(); let messages = vec![vec![1], vec![2]]; - let events = messages - .iter() - .cloned() - .map(|m| Event::NotificationsReceived { - remote: remote_peer, - messages: vec![(protocol.clone(), m.into())], - }) - .collect::>(); // Send first event before subscribing. - event_sender - .start_send(events[0].clone()) - .expect("Event stream is unbounded; qed."); + tx.send(NotificationEvent::NotificationReceived { + peer: remote_peer, + notification: messages[0].clone().into(), + }) + .await + .unwrap(); let mut subscribers = vec![]; for _ in 0..2 { @@ -593,9 +667,12 @@ mod tests { } // Send second event after subscribing. - event_sender - .start_send(events[1].clone()) - .expect("Event stream is unbounded; qed."); + tx.send(NotificationEvent::NotificationReceived { + peer: remote_peer, + notification: messages[1].clone().into(), + }) + .await + .unwrap(); tokio::spawn(gossip_engine); @@ -672,6 +749,8 @@ mod tests { let remote_peer = PeerId::random(); let network = TestNetwork::default(); let sync = Arc::new(TestSync::default()); + let (mut tx, rx) = unbounded(); + let notification_service = Box::new(TestNotificationService { rx }); let num_channels_per_topic = channels.iter().fold( HashMap::new(), @@ -699,6 +778,7 @@ mod tests { let mut gossip_engine = GossipEngine::::new( network.clone(), sync.clone(), + notification_service, protocol.clone(), Arc::new(TestValidator {}), None, @@ -724,22 +804,18 @@ mod tests { } } - let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); - // Register the remote peer. - event_sender - .start_send(Event::NotificationStreamOpened { - remote: remote_peer, - protocol: protocol.clone(), - negotiated_fallback: None, - role: ObservedRole::Authority, - received_handshake: vec![], - }) - .expect("Event stream is unbounded; qed."); + tx.start_send(NotificationEvent::NotificationStreamOpened { + peer: remote_peer, + direction: Direction::Inbound, + negotiated_fallback: None, + handshake: Roles::FULL.encode(), + }) + .unwrap(); // Send messages into the network event stream. for (i_notification, messages) in notifications.iter().enumerate() { - let messages = messages + let messages: Vec> = messages .into_iter() .enumerate() .map(|(i_message, Message { topic })| { @@ -752,13 +828,17 @@ mod tests { message.push(i_notification.try_into().unwrap()); message.push(i_message.try_into().unwrap()); - (protocol.clone(), message.into()) + message.into() }) .collect(); - event_sender - .start_send(Event::NotificationsReceived { remote: remote_peer, messages }) - .expect("Event stream is unbounded; qed."); + for message in messages { + tx.start_send(NotificationEvent::NotificationReceived { + peer: remote_peer, + notification: message, + }) + .unwrap(); + } } let mut received_msgs_per_topic_all_chan = HashMap::::new(); diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index 4bfb5a7d37f..91b56b0f097 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -23,7 +23,7 @@ use libp2p::PeerId; use schnellru::{ByLength, LruMap}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network::types::ProtocolName; +use sc_network::{types::ProtocolName, NotificationService}; use sc_network_common::role::ObservedRole; use sp_runtime::traits::{Block as BlockT, Hash, HashingFor}; use std::{collections::HashMap, iter, sync::Arc, time, time::Instant}; @@ -74,33 +74,33 @@ struct MessageEntry { /// Local implementation of `ValidatorContext`. struct NetworkContext<'g, 'p, B: BlockT> { gossip: &'g mut ConsensusGossip, - network: &'p mut dyn Network, + notification_service: &'p mut Box, } impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { /// Broadcast all messages with given topic to peers that do not have it yet. fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { - self.gossip.broadcast_topic(self.network, topic, force); + self.gossip.broadcast_topic(self.notification_service, topic, force); } /// Broadcast a message to all peers that have not received it previously. fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool) { - self.gossip.multicast(self.network, topic, message, force); + self.gossip.multicast(self.notification_service, topic, message, force); } /// Send addressed message to a peer. fn send_message(&mut self, who: &PeerId, message: Vec) { - self.network.write_notification(*who, self.gossip.protocol.clone(), message); + self.notification_service.send_sync_notification(who, message); } /// Send all messages with given topic to a peer. fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { - self.gossip.send_topic(self.network, who, topic, force); + self.gossip.send_topic(self.notification_service, who, topic, force); } } fn propagate<'a, B: BlockT, I>( - network: &mut dyn Network, + notification_service: &mut Box, protocol: ProtocolName, messages: I, intent: MessageIntent, @@ -147,7 +147,7 @@ where ?message, "Propagating message", ); - network.write_notification(*id, protocol.clone(), message.clone()); + notification_service.send_sync_notification(id, message.clone()); } } } @@ -191,7 +191,12 @@ impl ConsensusGossip { } /// Handle new connected peer. - pub fn new_peer(&mut self, network: &mut dyn Network, who: PeerId, role: ObservedRole) { + pub fn new_peer( + &mut self, + notification_service: &mut Box, + who: PeerId, + role: ObservedRole, + ) { tracing::trace!( target:"gossip", %who, @@ -202,7 +207,7 @@ impl ConsensusGossip { self.peers.insert(who, PeerConsensus { known_messages: Default::default() }); let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; + let mut context = NetworkContext { gossip: self, notification_service }; validator.new_peer(&mut context, &who, role); } @@ -233,30 +238,35 @@ impl ConsensusGossip { } /// Call when a peer has been disconnected to stop tracking gossip status. - pub fn peer_disconnected(&mut self, network: &mut dyn Network, who: PeerId) { + pub fn peer_disconnected( + &mut self, + notification_service: &mut Box, + who: PeerId, + ) { let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; + let mut context = NetworkContext { gossip: self, notification_service }; validator.peer_disconnected(&mut context, &who); self.peers.remove(&who); } /// Perform periodic maintenance - pub fn tick(&mut self, network: &mut dyn Network) { + pub fn tick(&mut self, notification_service: &mut Box) { self.collect_garbage(); if Instant::now() >= self.next_broadcast { - self.rebroadcast(network); + self.rebroadcast(notification_service); self.next_broadcast = Instant::now() + REBROADCAST_INTERVAL; } } /// Rebroadcast all messages to all peers. - fn rebroadcast(&mut self, network: &mut dyn Network) { + fn rebroadcast(&mut self, notification_service: &mut Box) { let messages = self .messages .iter() .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); + propagate( - network, + notification_service, self.protocol.clone(), messages, MessageIntent::PeriodicRebroadcast, @@ -266,7 +276,12 @@ impl ConsensusGossip { } /// Broadcast all messages with given topic. - pub fn broadcast_topic(&mut self, network: &mut dyn Network, topic: B::Hash, force: bool) { + pub fn broadcast_topic( + &mut self, + notification_service: &mut Box, + topic: B::Hash, + force: bool, + ) { let messages = self.messages.iter().filter_map(|entry| { if entry.topic == topic { Some((&entry.message_hash, &entry.topic, &entry.message)) @@ -276,7 +291,7 @@ impl ConsensusGossip { }); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; propagate( - network, + notification_service, self.protocol.clone(), messages, intent, @@ -327,6 +342,7 @@ impl ConsensusGossip { pub fn on_incoming( &mut self, network: &mut dyn Network, + notification_service: &mut Box, who: PeerId, messages: Vec>, ) -> Vec<(B::Hash, TopicNotification)> { @@ -367,7 +383,7 @@ impl ConsensusGossip { // validate the message let validation = { let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; + let mut context = NetworkContext { gossip: self, notification_service }; validator.validate(&mut context, &who, &message) }; @@ -414,7 +430,7 @@ impl ConsensusGossip { /// Send all messages with given topic to a peer. pub fn send_topic( &mut self, - network: &mut dyn Network, + notification_service: &mut Box, who: &PeerId, topic: B::Hash, force: bool, @@ -443,7 +459,7 @@ impl ConsensusGossip { ?entry.message, "Sending topic message", ); - network.write_notification(*who, self.protocol.clone(), entry.message.clone()); + notification_service.send_sync_notification(who, entry.message.clone()); } } } @@ -451,7 +467,7 @@ impl ConsensusGossip { /// Multicast a message to all peers. pub fn multicast( &mut self, - network: &mut dyn Network, + notification_service: &mut Box, topic: B::Hash, message: Vec, force: bool, @@ -460,7 +476,7 @@ impl ConsensusGossip { self.register_message_hashed(message_hash, topic, message.clone(), None); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; propagate( - network, + notification_service, self.protocol.clone(), iter::once((&message_hash, &topic, &message)), intent, @@ -471,7 +487,12 @@ impl ConsensusGossip { /// Send addressed message to a peer. The message is not kept or multicast /// later on. - pub fn send_message(&mut self, network: &mut dyn Network, who: &PeerId, message: Vec) { + pub fn send_message( + &mut self, + notification_service: &mut Box, + who: &PeerId, + message: Vec, + ) { let peer = match self.peers.get_mut(who) { None => return, Some(peer) => peer, @@ -488,7 +509,7 @@ impl ConsensusGossip { ); peer.known_messages.insert(message_hash); - network.write_notification(*who, self.protocol.clone(), message); + notification_service.send_sync_notification(who, message) } } @@ -524,9 +545,9 @@ mod tests { use crate::multiaddr::Multiaddr; use futures::prelude::*; use sc_network::{ - config::MultiaddrWithPeerId, event::Event, NetworkBlock, NetworkEventStream, - NetworkNotification, NetworkPeers, NotificationSenderError, - NotificationSenderT as NotificationSender, ReputationChange, + config::MultiaddrWithPeerId, event::Event, service::traits::NotificationEvent, MessageSink, + NetworkBlock, NetworkEventStream, NetworkNotification, NetworkPeers, + NotificationSenderError, NotificationSenderT as NotificationSender, ReputationChange, }; use sp_runtime::{ testing::{Block as RawBlock, ExtrinsicWrapper, H256}, @@ -651,6 +672,10 @@ mod tests { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, _handshake: Vec) -> Option { + None + } } impl NetworkEventStream for NoOpNetwork { @@ -691,6 +716,62 @@ mod tests { } } + #[derive(Debug, Default)] + struct NoOpNotificationService {} + + #[async_trait::async_trait] + impl NotificationService for NoOpNotificationService { + /// Instruct `Notifications` to open a new substream for `peer`. + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Instruct `Notifications` to close substream for `peer`. + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + unimplemented!(); + } + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, _peer: &PeerId, _notification: Vec) { + unimplemented!(); + } + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + async fn send_async_notification( + &self, + _peer: &PeerId, + _notification: Vec, + ) -> Result<(), sc_network::error::Error> { + unimplemented!(); + } + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + fn try_set_handshake(&mut self, _handshake: Vec) -> Result<(), ()> { + unimplemented!(); + } + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option { + None + } + + fn clone(&mut self) -> Result, ()> { + unimplemented!(); + } + + fn protocol(&self) -> &ProtocolName { + unimplemented!(); + } + + fn message_sink(&self, _peer: &PeerId) -> Option> { + unimplemented!(); + } + } + #[test] fn collects_garbage() { struct AllowOne; @@ -773,20 +854,28 @@ mod tests { fn peer_is_removed_on_disconnect() { let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); - let mut network = NoOpNetwork::default(); + let mut notification_service: Box = + Box::new(NoOpNotificationService::default()); let peer_id = PeerId::random(); - consensus.new_peer(&mut network, peer_id, ObservedRole::Full); + consensus.new_peer(&mut notification_service, peer_id, ObservedRole::Full); assert!(consensus.peers.contains_key(&peer_id)); - consensus.peer_disconnected(&mut network, peer_id); + consensus.peer_disconnected(&mut notification_service, peer_id); assert!(!consensus.peers.contains_key(&peer_id)); } #[test] fn on_incoming_ignores_discarded_messages() { + let mut notification_service: Box = + Box::new(NoOpNotificationService::default()); let to_forward = ConsensusGossip::::new(Arc::new(DiscardAll), "/foo".into(), None) - .on_incoming(&mut NoOpNetwork::default(), PeerId::random(), vec![vec![1, 2, 3]]); + .on_incoming( + &mut NoOpNetwork::default(), + &mut notification_service, + PeerId::random(), + vec![vec![1, 2, 3]], + ); assert!( to_forward.is_empty(), @@ -798,11 +887,14 @@ mod tests { #[test] fn on_incoming_ignores_unregistered_peer() { let mut network = NoOpNetwork::default(); + let mut notification_service: Box = + Box::new(NoOpNotificationService::default()); let remote = PeerId::random(); let to_forward = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None) .on_incoming( &mut network, + &mut notification_service, // Unregistered peer. remote, vec![vec![1, 2, 3]], @@ -822,18 +914,20 @@ mod tests { let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), "/foo".into(), None); let mut network = NoOpNetwork::default(); + let mut notification_service: Box = + Box::new(NoOpNotificationService::default()); let peer_id = PeerId::random(); - consensus.new_peer(&mut network, peer_id, ObservedRole::Full); + consensus.new_peer(&mut notification_service, peer_id, ObservedRole::Full); assert!(consensus.peers.contains_key(&peer_id)); let peer_id2 = PeerId::random(); - consensus.new_peer(&mut network, peer_id2, ObservedRole::Full); + consensus.new_peer(&mut notification_service, peer_id2, ObservedRole::Full); assert!(consensus.peers.contains_key(&peer_id2)); let message = vec![vec![1, 2, 3]]; - consensus.on_incoming(&mut network, peer_id, message.clone()); - consensus.on_incoming(&mut network, peer_id2, message.clone()); + consensus.on_incoming(&mut network, &mut notification_service, peer_id, message.clone()); + consensus.on_incoming(&mut network, &mut notification_service, peer_id2, message.clone()); assert_eq!( vec![(peer_id, rep::GOSSIP_SUCCESS)], diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 7b0536addda..8b599f058f7 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -37,6 +37,8 @@ serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.108" smallvec = "1.11.0" thiserror = "1.0" +tokio = { version = "1.22.0", features = ["macros", "sync"] } +tokio-stream = "0.1.7" unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } zeroize = "1.4.3" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } diff --git a/substrate/client/network/common/src/role.rs b/substrate/client/network/common/src/role.rs index fd02c00e232..11b7a7924c4 100644 --- a/substrate/client/network/common/src/role.rs +++ b/substrate/client/network/common/src/role.rs @@ -28,7 +28,7 @@ use codec::{self, Encode, EncodeLike, Input, Output}; /// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a /// > node says about itself, while `ObservedRole` is a `Role` merged with the /// > information known locally about that node. -#[derive(Debug, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ObservedRole { /// Full node. Full, @@ -45,6 +45,18 @@ impl ObservedRole { } } +impl From for ObservedRole { + fn from(roles: Roles) -> Self { + if roles.is_authority() { + ObservedRole::Authority + } else if roles.is_full() { + ObservedRole::Full + } else { + ObservedRole::Light + } + } +} + /// Role of the local node. #[derive(Debug, Clone)] pub enum Role { diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index 0aa724818e0..9f770bc3ba7 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -22,12 +22,13 @@ use crate::{ peer_info, peer_store::PeerStoreHandle, protocol::{CustomMessageOutcome, NotificationsSink, Protocol}, + protocol_controller::SetId, request_responses::{self, IfDisconnected, ProtocolConfig, RequestFailure}, + service::traits::Direction, types::ProtocolName, ReputationChange, }; -use bytes::Bytes; use futures::channel::oneshot; use libp2p::{ core::Multiaddr, identify::Info as IdentifyInfo, identity::PublicKey, kad::RecordKey, @@ -35,7 +36,6 @@ use libp2p::{ }; use parking_lot::Mutex; -use sc_network_common::role::{ObservedRole, Roles}; use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, sync::Arc, time::Duration}; @@ -97,8 +97,10 @@ pub enum BehaviourOut { NotificationStreamOpened { /// Node we opened the substream with. remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + /// Set ID. + set_id: SetId, + /// Direction of the stream. + direction: Direction, /// If the negotiation didn't use the main name of the protocol (the one in /// `notifications_protocol`), then this field contains which name has actually been /// used. @@ -106,8 +108,6 @@ pub enum BehaviourOut { negotiated_fallback: Option, /// Object that permits sending notifications to the peer. notifications_sink: NotificationsSink, - /// Role of the remote. - role: ObservedRole, /// Received handshake. received_handshake: Vec, }, @@ -120,8 +120,8 @@ pub enum BehaviourOut { NotificationStreamReplaced { /// Id of the peer we are connected to. remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + /// Set ID. + set_id: SetId, /// Replacement for the previous [`NotificationsSink`]. notifications_sink: NotificationsSink, }, @@ -131,16 +131,18 @@ pub enum BehaviourOut { NotificationStreamClosed { /// Node we closed the substream with. remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - protocol: ProtocolName, + /// Set ID. + set_id: SetId, }, /// Received one or more messages from the given node using the given protocol. NotificationsReceived { /// Node we received the message from. remote: PeerId, + /// Set ID. + set_id: SetId, /// Concerned protocol and associated message. - messages: Vec<(ProtocolName, Bytes)>, + notification: Vec, }, /// We have obtained identity information from a peer, including the addresses it is listening @@ -272,44 +274,33 @@ impl Behaviour { } } -fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { - if roles.is_authority() { - ObservedRole::Authority - } else if roles.is_full() { - ObservedRole::Full - } else { - ObservedRole::Light - } -} - impl From for BehaviourOut { fn from(event: CustomMessageOutcome) -> Self { match event { CustomMessageOutcome::NotificationStreamOpened { remote, - protocol, + set_id, + direction, negotiated_fallback, - roles, received_handshake, notifications_sink, } => BehaviourOut::NotificationStreamOpened { remote, - protocol, + set_id, + direction, negotiated_fallback, - role: reported_roles_to_observed_role(roles), received_handshake, notifications_sink, }, CustomMessageOutcome::NotificationStreamReplaced { remote, - protocol, + set_id, notifications_sink, - } => BehaviourOut::NotificationStreamReplaced { remote, protocol, notifications_sink }, - CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => - BehaviourOut::NotificationStreamClosed { remote, protocol }, - CustomMessageOutcome::NotificationsReceived { remote, messages } => - BehaviourOut::NotificationsReceived { remote, messages }, - CustomMessageOutcome::None => BehaviourOut::None, + } => BehaviourOut::NotificationStreamReplaced { remote, set_id, notifications_sink }, + CustomMessageOutcome::NotificationStreamClosed { remote, set_id } => + BehaviourOut::NotificationStreamClosed { remote, set_id }, + CustomMessageOutcome::NotificationsReceived { remote, set_id, notification } => + BehaviourOut::NotificationsReceived { remote, set_id, notification }, } } } diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs index 124d73a74db..24e96843c32 100644 --- a/substrate/client/network/src/config.rs +++ b/substrate/client/network/src/config.rs @@ -23,10 +23,11 @@ pub use crate::{ discovery::DEFAULT_KADEMLIA_REPLICATION_FACTOR, - protocol::NotificationsSink, + protocol::{notification_service, NotificationsSink, ProtocolHandlePair}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, + service::traits::NotificationService, types::ProtocolName, }; @@ -47,7 +48,6 @@ pub use sc_network_common::{ ExHashT, }; -use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::Block as BlockT; use std::{ @@ -454,14 +454,14 @@ impl Default for SetConfig { /// /// > **Note**: As new fields might be added in the future, please consider using the `new` method /// > and modifiers instead of creating this struct manually. -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct NonDefaultSetConfig { /// Name of the notifications protocols of this set. A substream on this set will be /// considered established once this protocol is open. /// /// > **Note**: This field isn't present for the default set, as this is handled internally /// > by the networking code. - pub notifications_protocol: ProtocolName, + protocol_name: ProtocolName, /// If the remote reports that it doesn't support the protocol indicated in the /// `notifications_protocol` field, then each of these fallback names will be tried one by @@ -469,37 +469,84 @@ pub struct NonDefaultSetConfig { /// /// If a fallback is used, it will be reported in /// `sc_network::protocol::event::Event::NotificationStreamOpened::negotiated_fallback` - pub fallback_names: Vec, + fallback_names: Vec, /// Handshake of the protocol /// /// NOTE: Currently custom handshakes are not fully supported. See issue #5685 for more /// details. This field is temporarily used to allow moving the hardcoded block announcement /// protocol out of `protocol.rs`. - pub handshake: Option, + handshake: Option, /// Maximum allowed size of single notifications. - pub max_notification_size: u64, + max_notification_size: u64, /// Base configuration. - pub set_config: SetConfig, + set_config: SetConfig, + + /// Notification handle. + /// + /// Notification handle is created during `NonDefaultSetConfig` creation and its other half, + /// `Box` is given to the protocol created the config and + /// `ProtocolHandle` is given to `Notifications` when it initializes itself. This handle allows + /// `Notifications ` to communicate with the protocol directly without relaying events through + /// `sc-network.` + protocol_handle_pair: ProtocolHandlePair, } impl NonDefaultSetConfig { /// Creates a new [`NonDefaultSetConfig`]. Zero slots and accepts only reserved nodes. - pub fn new(notifications_protocol: ProtocolName, max_notification_size: u64) -> Self { - Self { - notifications_protocol, - max_notification_size, - fallback_names: Vec::new(), - handshake: None, - set_config: SetConfig { - in_peers: 0, - out_peers: 0, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Deny, + /// Also returns an object which allows the protocol to communicate with `Notifications`. + pub fn new( + protocol_name: ProtocolName, + fallback_names: Vec, + max_notification_size: u64, + handshake: Option, + set_config: SetConfig, + ) -> (Self, Box) { + let (protocol_handle_pair, notification_service) = + notification_service(protocol_name.clone()); + ( + Self { + protocol_name, + max_notification_size, + fallback_names, + handshake, + set_config, + protocol_handle_pair, }, - } + notification_service, + ) + } + + /// Get reference to protocol name. + pub fn protocol_name(&self) -> &ProtocolName { + &self.protocol_name + } + + /// Get reference to fallback protocol names. + pub fn fallback_names(&self) -> impl Iterator { + self.fallback_names.iter() + } + + /// Get reference to handshake. + pub fn handshake(&self) -> &Option { + &self.handshake + } + + /// Get maximum notification size. + pub fn max_notification_size(&self) -> u64 { + self.max_notification_size + } + + /// Get reference to `SetConfig`. + pub fn set_config(&self) -> &SetConfig { + &self.set_config + } + + /// Take `ProtocolHandlePair` from `NonDefaultSetConfig` + pub fn take_protocol_handle(self) -> ProtocolHandlePair { + self.protocol_handle_pair } /// Modifies the configuration to allow non-reserved nodes. @@ -703,9 +750,6 @@ pub struct Params { /// Block announce protocol configuration pub block_announce_config: NonDefaultSetConfig, - - /// TX channel for direct communication with `SyncingEngine` and `Protocol`. - pub tx: TracingUnboundedSender>, } /// Full network configuration. diff --git a/substrate/client/network/src/error.rs b/substrate/client/network/src/error.rs index f0828fb821f..01e8356fb55 100644 --- a/substrate/client/network/src/error.rs +++ b/substrate/client/network/src/error.rs @@ -68,6 +68,15 @@ pub enum Error { /// Name of the protocol registered multiple times. protocol: ProtocolName, }, + /// Peer does not exist. + #[error("Peer `{0}` does not exist.")] + PeerDoesntExist(PeerId), + /// Channel closed. + #[error("Channel closed")] + ChannelClosed, + /// Connection closed. + #[error("Connection closed")] + ConnectionClosed, } // Make `Debug` use the `Display` implementation. diff --git a/substrate/client/network/src/event.rs b/substrate/client/network/src/event.rs index 2913f0b5522..dc4fd53a49a 100644 --- a/substrate/client/network/src/event.rs +++ b/substrate/client/network/src/event.rs @@ -19,14 +19,12 @@ //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. -use crate::{types::ProtocolName, NotificationsSink}; +use crate::types::ProtocolName; use bytes::Bytes; -use futures::channel::oneshot; use libp2p::{kad::record::Key, PeerId}; -use sc_network_common::{role::ObservedRole, sync::message::BlockAnnouncesHandshake}; -use sp_runtime::traits::Block as BlockT; +use sc_network_common::role::ObservedRole; /// Events generated by DHT as a response to get_value and put_value requests. #[derive(Debug, Clone)] @@ -92,46 +90,3 @@ pub enum Event { messages: Vec<(ProtocolName, Bytes)>, }, } - -/// Event sent to `SyncingEngine` -// TODO: remove once `NotificationService` is implemented. -pub enum SyncEvent { - /// Opened a substream with the given node with the given notifications protocol. - /// - /// The protocol is always one of the notification protocols that have been registered. - NotificationStreamOpened { - /// Node we opened the substream with. - remote: PeerId, - /// Received handshake. - received_handshake: BlockAnnouncesHandshake, - /// Notification sink. - sink: NotificationsSink, - /// Is the connection inbound. - inbound: bool, - /// Channel for reporting accept/reject of the substream. - tx: oneshot::Sender, - }, - - /// Closed a substream with the given node. Always matches a corresponding previous - /// `NotificationStreamOpened` message. - NotificationStreamClosed { - /// Node we closed the substream with. - remote: PeerId, - }, - - /// Notification sink was replaced. - NotificationSinkReplaced { - /// Node we closed the substream with. - remote: PeerId, - /// Notification sink. - sink: NotificationsSink, - }, - - /// Received one or more messages from the given node using the given protocol. - NotificationsReceived { - /// Node we received the message from. - remote: PeerId, - /// Concerned protocol and associated message. - messages: Vec, - }, -} diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index 4dc9bdb4cc1..4c39c57e8df 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -244,7 +244,6 @@ mod behaviour; mod protocol; -mod service; #[cfg(test)] mod mock; @@ -258,25 +257,30 @@ pub mod peer_info; pub mod peer_store; pub mod protocol_controller; pub mod request_responses; +pub mod service; pub mod transport; pub mod types; pub mod utils; -pub use event::{DhtEvent, Event, SyncEvent}; +pub use event::{DhtEvent, Event}; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use request_responses::{Config, IfDisconnected, RequestFailure}; -pub use sc_network_common::{role::ObservedRole, types::ReputationChange}; +pub use sc_network_common::{ + role::{ObservedRole, Roles}, + types::ReputationChange, +}; pub use service::{ signature::Signature, traits::{ - KademliaKey, NetworkBlock, NetworkDHTProvider, NetworkEventStream, NetworkNotification, - NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo, NetworkStatus, - NetworkStatusProvider, NetworkSyncForkRequest, NotificationSender as NotificationSenderT, - NotificationSenderError, NotificationSenderReady, + KademliaKey, MessageSink, NetworkBlock, NetworkDHTProvider, NetworkEventStream, + NetworkNotification, NetworkPeers, NetworkRequest, NetworkSigner, NetworkStateInfo, + NetworkStatus, NetworkStatusProvider, NetworkSyncForkRequest, + NotificationSender as NotificationSenderT, NotificationSenderError, + NotificationSenderReady, NotificationService, }, - DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, NotificationsSink, - OutboundFailure, PublicKey, + DecodingError, Keypair, NetworkService, NetworkWorker, NotificationSender, OutboundFailure, + PublicKey, }; pub use types::ProtocolName; diff --git a/substrate/client/network/src/mock.rs b/substrate/client/network/src/mock.rs index bc596b0fa57..534b8118970 100644 --- a/substrate/client/network/src/mock.rs +++ b/substrate/client/network/src/mock.rs @@ -20,6 +20,7 @@ use crate::{peer_store::PeerStoreProvider, protocol_controller::ProtocolHandle, ReputationChange}; use libp2p::PeerId; +use sc_network_common::role::ObservedRole; use std::collections::HashSet; /// No-op `PeerStore`. @@ -49,6 +50,14 @@ impl PeerStoreProvider for MockPeerStore { 0 } + fn peer_role(&self, _peer_id: &PeerId) -> Option { + None + } + + fn set_peer_role(&mut self, _peer_id: &PeerId, _role: ObservedRole) { + unimplemented!(); + } + fn outgoing_candidates(&self, _count: usize, _ignored: HashSet<&PeerId>) -> Vec { unimplemented!() } diff --git a/substrate/client/network/src/peer_store.rs b/substrate/client/network/src/peer_store.rs index 35d17e588cb..4b28b8e7544 100644 --- a/substrate/client/network/src/peer_store.rs +++ b/substrate/client/network/src/peer_store.rs @@ -23,7 +23,7 @@ use libp2p::PeerId; use log::trace; use parking_lot::Mutex; use partial_sort::PartialSort; -use sc_network_common::types::ReputationChange; +use sc_network_common::{role::ObservedRole, types::ReputationChange}; use std::{ cmp::{Ord, Ordering, PartialOrd}, collections::{hash_map::Entry, HashMap, HashSet}, @@ -66,9 +66,15 @@ pub trait PeerStoreProvider: Debug + Send { /// Adjust peer reputation. fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange); + /// Set peer role. + fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole); + /// Get peer reputation. fn peer_reputation(&self, peer_id: &PeerId) -> i32; + /// Get peer role, if available. + fn peer_role(&self, peer_id: &PeerId) -> Option; + /// Get candidates with highest reputations for initiating outgoing connections. fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec; } @@ -96,10 +102,18 @@ impl PeerStoreProvider for PeerStoreHandle { self.inner.lock().report_peer(peer_id, change) } + fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole) { + self.inner.lock().set_peer_role(peer_id, role) + } + fn peer_reputation(&self, peer_id: &PeerId) -> i32 { self.inner.lock().peer_reputation(peer_id) } + fn peer_role(&self, peer_id: &PeerId) -> Option { + self.inner.lock().peer_role(peer_id) + } + fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec { self.inner.lock().outgoing_candidates(count, ignored) } @@ -122,13 +136,19 @@ impl PeerStoreHandle { #[derive(Debug, Clone, Copy)] struct PeerInfo { + /// Reputation of the peer. reputation: i32, + + /// Instant when the peer was last updated. last_updated: Instant, + + /// Role of the peer, if known. + role: Option, } impl Default for PeerInfo { fn default() -> Self { - Self { reputation: 0, last_updated: Instant::now() } + Self { reputation: 0, last_updated: Instant::now(), role: None } } } @@ -242,10 +262,27 @@ impl PeerStoreInner { } } + fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole) { + log::trace!(target: LOG_TARGET, "Set {peer_id} role to {role:?}"); + + match self.peers.entry(*peer_id) { + Entry::Occupied(mut entry) => { + entry.get_mut().role = Some(role); + }, + Entry::Vacant(entry) => { + entry.insert(PeerInfo { role: Some(role), ..Default::default() }); + }, + } + } + fn peer_reputation(&self, peer_id: &PeerId) -> i32 { self.peers.get(peer_id).map_or(0, |info| info.reputation) } + fn peer_role(&self, peer_id: &PeerId) -> Option { + self.peers.get(peer_id).map_or(None, |info| info.role) + } + fn outgoing_candidates(&self, count: usize, ignored: HashSet<&PeerId>) -> Vec { let mut candidates = self .peers diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index 9b94f288352..ea7977cc9ae 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -20,12 +20,11 @@ use crate::{ config, error, peer_store::{PeerStoreHandle, PeerStoreProvider}, protocol_controller::{self, SetId}, + service::traits::Direction, types::ProtocolName, }; -use bytes::Bytes; -use codec::{DecodeAll, Encode}; -use futures::{channel::oneshot, stream::FuturesUnordered, StreamExt}; +use codec::Encode; use libp2p::{ core::Endpoint, swarm::{ @@ -34,24 +33,23 @@ use libp2p::{ }, Multiaddr, PeerId, }; -use log::{debug, error, warn}; +use log::warn; -use sc_network_common::{role::Roles, sync::message::BlockAnnouncesHandshake}; -use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender}; +use codec::DecodeAll; +use prometheus_endpoint::Registry; +use sc_network_common::role::Roles; +use sc_utils::mpsc::TracingUnboundedReceiver; use sp_runtime::traits::Block as BlockT; -use std::{ - collections::{HashMap, HashSet}, - future::Future, - iter, - pin::Pin, - task::Poll, -}; +use std::{collections::HashSet, iter, task::Poll}; -use message::{generic::Message as GenericMessage, Message}; use notifications::{Notifications, NotificationsOut}; -pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; +pub(crate) use notifications::ProtocolHandle; + +pub use notifications::{ + notification_service, NotificationsSink, NotifsHandlerError, ProtocolHandlePair, Ready, +}; mod notifications; @@ -64,85 +62,93 @@ pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * /// Identifier of the peerset for the block announces protocol. const HARDCODED_PEERSETS_SYNC: SetId = SetId::from(0); -mod rep { - use crate::ReputationChange as Rep; - /// We received a message that failed to decode. - pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); -} - -type PendingSyncSubstreamValidation = - Pin> + Send>>; - // Lock must always be taken in order declared here. pub struct Protocol { - /// Used to report reputation changes. - peer_store_handle: PeerStoreHandle, /// Handles opening the unique substream and sending and receiving raw messages. behaviour: Notifications, /// List of notifications protocols that have been registered. notification_protocols: Vec, - /// If we receive a new "substream open" event that contains an invalid handshake, we ask the - /// inner layer to force-close the substream. Force-closing the substream will generate a - /// "substream closed" event. This is a problem: since we can't propagate the "substream open" - /// event to the outer layers, we also shouldn't propagate this "substream closed" event. To - /// solve this, an entry is added to this map whenever an invalid handshake is received. - /// Entries are removed when the corresponding "substream closed" is later received. - bad_handshake_substreams: HashSet<(PeerId, SetId)>, - /// Connected peers on sync protocol. - peers: HashMap, - sync_substream_validations: FuturesUnordered, - tx: TracingUnboundedSender>, + /// Handle to `PeerStore`. + peer_store_handle: PeerStoreHandle, + /// Streams for peers whose handshake couldn't be determined. + bad_handshake_streams: HashSet, + sync_handle: ProtocolHandle, _marker: std::marker::PhantomData, } impl Protocol { /// Create a new instance. - pub fn new( + pub(crate) fn new( roles: Roles, + registry: &Option, notification_protocols: Vec, block_announces_protocol: config::NonDefaultSetConfig, peer_store_handle: PeerStoreHandle, protocol_controller_handles: Vec, from_protocol_controllers: TracingUnboundedReceiver, - tx: TracingUnboundedSender>, - ) -> error::Result { - let behaviour = { - Notifications::new( - protocol_controller_handles, - from_protocol_controllers, - // NOTE: Block announcement protocol is still very much hardcoded into `Protocol`. - // This protocol must be the first notification protocol given to - // `Notifications` - iter::once(notifications::ProtocolConfig { - name: block_announces_protocol.notifications_protocol.clone(), - fallback_names: block_announces_protocol.fallback_names.clone(), - handshake: block_announces_protocol.handshake.as_ref().unwrap().to_vec(), - max_notification_size: block_announces_protocol.max_notification_size, - }) - .chain(notification_protocols.iter().map(|s| notifications::ProtocolConfig { - name: s.notifications_protocol.clone(), - fallback_names: s.fallback_names.clone(), - handshake: s.handshake.as_ref().map_or(roles.encode(), |h| (*h).to_vec()), - max_notification_size: s.max_notification_size, - })), + ) -> error::Result<(Self, Vec)> { + let (behaviour, notification_protocols, handles) = { + let installed_protocols = iter::once(block_announces_protocol.protocol_name().clone()) + .chain(notification_protocols.iter().map(|p| p.protocol_name().clone())) + .collect::>(); + + // NOTE: Block announcement protocol is still very much hardcoded into + // `Protocol`. This protocol must be the first notification protocol given to + // `Notifications` + let (protocol_configs, handles): (Vec<_>, Vec<_>) = iter::once({ + let config = notifications::ProtocolConfig { + name: block_announces_protocol.protocol_name().clone(), + fallback_names: block_announces_protocol.fallback_names().cloned().collect(), + handshake: block_announces_protocol.handshake().as_ref().unwrap().to_vec(), + max_notification_size: block_announces_protocol.max_notification_size(), + }; + + let (handle, command_stream) = + block_announces_protocol.take_protocol_handle().split(); + + ((config, handle.clone(), command_stream), handle) + }) + .chain(notification_protocols.into_iter().map(|s| { + let config = notifications::ProtocolConfig { + name: s.protocol_name().clone(), + fallback_names: s.fallback_names().cloned().collect(), + handshake: s.handshake().as_ref().map_or(roles.encode(), |h| (*h).to_vec()), + max_notification_size: s.max_notification_size(), + }; + + let (handle, command_stream) = s.take_protocol_handle().split(); + + ((config, handle.clone(), command_stream), handle) + })) + .unzip(); + + ( + Notifications::new( + protocol_controller_handles, + from_protocol_controllers, + registry, + protocol_configs.into_iter(), + ), + installed_protocols, + handles, ) }; let protocol = Self { - peer_store_handle, behaviour, - notification_protocols: iter::once(block_announces_protocol.notifications_protocol) - .chain(notification_protocols.iter().map(|s| s.notifications_protocol.clone())) - .collect(), - bad_handshake_substreams: Default::default(), - peers: HashMap::new(), - sync_substream_validations: FuturesUnordered::new(), - tx, + sync_handle: handles[0].clone(), + peer_store_handle, + notification_protocols, + bad_handshake_streams: HashSet::new(), // TODO: remove when `BlockAnnouncesHandshake` is moved away from `Protocol` _marker: Default::default(), }; - Ok(protocol) + Ok((protocol, handles)) + } + + pub fn num_sync_peers(&self) -> usize { + self.sync_handle.num_peers() } /// Returns the list of all the peers we have an open channel to. @@ -163,21 +169,12 @@ impl Protocol { } } - /// Returns the number of peers we're connected to on sync protocol. - pub fn num_connected_peers(&self) -> usize { - self.peers.len() - } - - /// Set handshake for the notification protocol. - pub fn set_notification_handshake(&mut self, protocol: ProtocolName, handshake: Vec) { - if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.behaviour.set_notif_protocol_handshake(SetId::from(index), handshake); - } else { - error!( - target: "sub-libp2p", - "set_notification_handshake with unknown protocol: {}", - protocol - ); + /// Check if role is available for `peer_id` by attempt to decode the handshake to roles and if + /// that fails, check if the role has been registered to `PeerStore`. + fn role_available(&self, peer_id: &PeerId, handshake: &Vec) -> bool { + match Roles::decode_all(&mut &handshake[..]) { + Ok(_) => true, + Err(_) => self.peer_store_handle.peer_role(&peer_id).is_some(), } } } @@ -189,25 +186,42 @@ pub enum CustomMessageOutcome { /// Notification protocols have been opened with a remote. NotificationStreamOpened { remote: PeerId, - protocol: ProtocolName, + // protocol: ProtocolName, + set_id: SetId, + /// Direction of the stream. + direction: Direction, /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. negotiated_fallback: Option, - roles: Roles, + /// Received handshake. received_handshake: Vec, + /// Notification sink. notifications_sink: NotificationsSink, }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { + // Peer ID. remote: PeerId, - protocol: ProtocolName, + /// Set ID. + set_id: SetId, + /// New notification sink. notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. - NotificationStreamClosed { remote: PeerId, protocol: ProtocolName }, + NotificationStreamClosed { + // Peer ID. + remote: PeerId, + /// Set ID. + set_id: SetId, + }, /// Messages have been received on one or more notifications protocols. - NotificationsReceived { remote: PeerId, messages: Vec<(ProtocolName, Bytes)> }, - /// Now connected to a new peer for syncing purposes. - None, + NotificationsReceived { + // Peer ID. + remote: PeerId, + /// Set ID. + set_id: SetId, + /// Received notification. + notification: Vec, + }, } impl NetworkBehaviour for Protocol { @@ -274,23 +288,6 @@ impl NetworkBehaviour for Protocol { cx: &mut std::task::Context, params: &mut impl PollParameters, ) -> Poll>> { - while let Poll::Ready(Some(validation_result)) = - self.sync_substream_validations.poll_next_unpin(cx) - { - match validation_result { - Ok((peer, roles)) => { - self.peers.insert(peer, roles); - }, - Err(peer) => { - log::debug!( - target: "sub-libp2p", - "`SyncingEngine` rejected stream" - ); - self.behaviour.disconnect_peer(&peer, HARDCODED_PEERSETS_SYNC); - }, - } - } - let event = match self.behaviour.poll(cx, params) { Poll::Pending => return Poll::Pending, Poll::Ready(ToSwarm::GenerateEvent(ev)) => ev, @@ -307,204 +304,86 @@ impl NetworkBehaviour for Protocol { NotificationsOut::CustomProtocolOpen { peer_id, set_id, + direction, received_handshake, notifications_sink, negotiated_fallback, - inbound, - } => { - // Set number 0 is hardcoded the default set of peers we sync from. + .. + } => if set_id == HARDCODED_PEERSETS_SYNC { - // `received_handshake` can be either a `Status` message if received from the - // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block - // announces substream. - match as DecodeAll>::decode_all(&mut &received_handshake[..]) { - Ok(GenericMessage::Status(handshake)) => { - let roles = handshake.roles; - let handshake = BlockAnnouncesHandshake:: { - roles: handshake.roles, - best_number: handshake.best_number, - best_hash: handshake.best_hash, - genesis_hash: handshake.genesis_hash, - }; - - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send( - crate::SyncEvent::NotificationStreamOpened { - inbound, - remote: peer_id, - received_handshake: handshake, - sink: notifications_sink, - tx, - }, - ); - self.sync_substream_validations.push(Box::pin(async move { - match rx.await { - Ok(accepted) => - if accepted { - Ok((peer_id, roles)) - } else { - Err(peer_id) - }, - Err(_) => Err(peer_id), - } - })); - - CustomMessageOutcome::None - }, - Ok(msg) => { - debug!( - target: "sync", - "Expected Status message from {}, but got {:?}", - peer_id, - msg, - ); - self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None - }, - Err(err) => { - match as DecodeAll>::decode_all( - &mut &received_handshake[..], - ) { - Ok(handshake) => { - let roles = handshake.roles; - - let (tx, rx) = oneshot::channel(); - let _ = self.tx.unbounded_send( - crate::SyncEvent::NotificationStreamOpened { - inbound, - remote: peer_id, - received_handshake: handshake, - sink: notifications_sink, - tx, - }, - ); - self.sync_substream_validations.push(Box::pin(async move { - match rx.await { - Ok(accepted) => - if accepted { - Ok((peer_id, roles)) - } else { - Err(peer_id) - }, - Err(_) => Err(peer_id), - } - })); - CustomMessageOutcome::None - }, - Err(err2) => { - log::debug!( - target: "sync", - "Couldn't decode handshake sent by {}: {:?}: {} & {}", - peer_id, - received_handshake, - err, - err2, - ); - self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None - }, - } - }, - } + let _ = self.sync_handle.report_substream_opened( + peer_id, + direction, + received_handshake, + negotiated_fallback, + notifications_sink, + ); + None } else { - match ( - Roles::decode_all(&mut &received_handshake[..]), - self.peers.get(&peer_id), - ) { - (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { + match self.role_available(&peer_id, &received_handshake) { + true => Some(CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), + set_id, + direction, negotiated_fallback, - roles, received_handshake, notifications_sink, - }, - (Err(_), Some(roles)) if received_handshake.is_empty() => { - // As a convenience, we allow opening substreams for "external" - // notification protocols with an empty handshake. This fetches the - // roles from the locally-known roles. - // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - negotiated_fallback, - roles: *roles, - received_handshake, - notifications_sink, - } - }, - (Err(err), _) => { - debug!(target: "sync", "Failed to parse remote handshake: {}", err); - self.bad_handshake_substreams.insert((peer_id, set_id)); - self.behaviour.disconnect_peer(&peer_id, set_id); - self.peer_store_handle.report_peer(peer_id, rep::BAD_MESSAGE); - CustomMessageOutcome::None + }), + false => { + self.bad_handshake_streams.insert(peer_id); + None }, } - } - }, + }, NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => - if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { - CustomMessageOutcome::None - } else if set_id == HARDCODED_PEERSETS_SYNC { - let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationSinkReplaced { - remote: peer_id, - sink: notifications_sink, - }); - CustomMessageOutcome::None + if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self + .sync_handle + .report_notification_sink_replaced(peer_id, notifications_sink); + None } else { - CustomMessageOutcome::NotificationStreamReplaced { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - notifications_sink, - } + (!self.bad_handshake_streams.contains(&peer_id)).then_some( + CustomMessageOutcome::NotificationStreamReplaced { + remote: peer_id, + set_id, + notifications_sink, + }, + ) }, NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { - if self.bad_handshake_substreams.remove(&(peer_id, set_id)) { - // The substream that has just been closed had been opened with a bad - // handshake. The outer layers have never received an opening event about this - // substream, and consequently shouldn't receive a closing event either. - CustomMessageOutcome::None - } else if set_id == HARDCODED_PEERSETS_SYNC { - let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationStreamClosed { - remote: peer_id, - }); - self.peers.remove(&peer_id); - CustomMessageOutcome::None + if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self.sync_handle.report_substream_closed(peer_id); + None } else { - CustomMessageOutcome::NotificationStreamClosed { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id)].clone(), - } + (!self.bad_handshake_streams.remove(&peer_id)).then_some( + CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, set_id }, + ) } }, NotificationsOut::Notification { peer_id, set_id, message } => { - if self.bad_handshake_substreams.contains(&(peer_id, set_id)) { - CustomMessageOutcome::None - } else if set_id == HARDCODED_PEERSETS_SYNC { - let _ = self.tx.unbounded_send(crate::SyncEvent::NotificationsReceived { - remote: peer_id, - messages: vec![message.freeze()], - }); - CustomMessageOutcome::None + if set_id == HARDCODED_PEERSETS_SYNC { + let _ = self + .sync_handle + .report_notification_received(peer_id, message.freeze().into()); + None } else { - let protocol_name = self.notification_protocols[usize::from(set_id)].clone(); - CustomMessageOutcome::NotificationsReceived { - remote: peer_id, - messages: vec![(protocol_name, message.freeze())], - } + (!self.bad_handshake_streams.contains(&peer_id)).then_some( + CustomMessageOutcome::NotificationsReceived { + remote: peer_id, + set_id, + notification: message.freeze().into(), + }, + ) } }, }; - if !matches!(outcome, CustomMessageOutcome::None) { - return Poll::Ready(ToSwarm::GenerateEvent(outcome)) + match outcome { + Some(event) => Poll::Ready(ToSwarm::GenerateEvent(event)), + None => { + cx.waker().wake_by_ref(); + Poll::Pending + }, } - - // This block can only be reached if an event was pulled from the behaviour and that - // resulted in `CustomMessageOutcome::None`. Since there might be another pending - // message from the behaviour, the task is scheduled again. - cx.waker().wake_by_ref(); - Poll::Pending } } diff --git a/substrate/client/network/src/protocol/message.rs b/substrate/client/network/src/protocol/message.rs index 66dca297537..247580083f9 100644 --- a/substrate/client/network/src/protocol/message.rs +++ b/substrate/client/network/src/protocol/message.rs @@ -29,6 +29,7 @@ use sc_network_common::message::RequestId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; /// Type alias for using the message type using block type parameters. +#[allow(unused)] pub type Message = generic::Message< ::Header, ::Hash, diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs index aa49cfcf9d4..10fa329097d 100644 --- a/substrate/client/network/src/protocol/notifications.rs +++ b/substrate/client/network/src/protocol/notifications.rs @@ -22,9 +22,13 @@ pub use self::{ behaviour::{Notifications, NotificationsOut, ProtocolConfig}, handler::{NotificationsSink, NotifsHandlerError, Ready}, + service::{notification_service, ProtocolHandlePair}, }; +pub(crate) use self::service::ProtocolHandle; + mod behaviour; mod handler; +mod service; mod tests; mod upgrade; diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index b78f15f8529..ef0c6540eee 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -17,16 +17,18 @@ // along with this program. If not, see . use crate::{ - protocol::notifications::handler::{ - self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut, + protocol::notifications::{ + handler::{self, NotificationsSink, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut}, + service::{metrics, NotificationCommand, ProtocolHandle, ValidationCallResult}, }, protocol_controller::{self, IncomingIndex, Message, SetId}, + service::traits::{Direction, ValidationResult}, types::ProtocolName, }; use bytes::BytesMut; use fnv::FnvHashMap; -use futures::prelude::*; +use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ core::{ConnectedPoint, Endpoint, Multiaddr}, swarm::{ @@ -36,11 +38,15 @@ use libp2p::{ }, PeerId, }; -use log::{debug, error, info, trace, warn}; +use log::{debug, error, trace, warn}; use parking_lot::RwLock; +use prometheus_endpoint::Registry; use rand::distributions::{Distribution as _, Uniform}; use sc_utils::mpsc::TracingUnboundedReceiver; use smallvec::SmallVec; +use tokio::sync::oneshot::error::RecvError; +use tokio_stream::StreamMap; + use std::{ cmp, collections::{hash_map::Entry, VecDeque}, @@ -51,6 +57,13 @@ use std::{ time::{Duration, Instant}, }; +/// Type representing a pending substream validation. +type PendingInboundValidation = + BoxFuture<'static, (Result, IncomingIndex)>; + +/// Logging target for the file. +const LOG_TARGET: &str = "sub-libp2p"; + /// Network behaviour that handles opening substreams for custom protocols with other peers. /// /// # How it works @@ -106,6 +119,12 @@ pub struct Notifications { /// Notification protocols. Entries never change after initialization. notif_protocols: Vec, + /// Protocol handles. + protocol_handles: Vec, + + // Command streams. + command_streams: StreamMap + Send + Unpin>>, + /// Protocol controllers are responsible for peer connections management. protocol_controller_handles: Vec, @@ -138,6 +157,18 @@ pub struct Notifications { /// Events to produce from `poll()`. events: VecDeque>, + + /// Pending inbound substream validations. + // + // NOTE: it's possible to read a stale response from `pending_inbound_validations` + // as the substream may get closed by the remote peer before the protocol has had + // a chance to validate it. [`Notifications`] must compare the `crate::peerset::IncomingIndex` + // returned by the completed future against the `crate::peerset::IncomingIndex` stored in + // `PeerState::Incoming` to check whether the completed future is stale or not. + pending_inbound_validations: FuturesUnordered, + + /// Metrics for notifications. + metrics: Option, } /// Configuration for a notifications protocol. @@ -235,6 +266,9 @@ enum PeerState { /// Incoming index tracking this connection. incoming_index: IncomingIndex, + /// Peerset has signaled it wants the substream closed. + peerset_rejected: bool, + /// List of connections with this peer, and their state. connections: SmallVec<[(ConnectionId, ConnectionState); crate::MAX_CONNECTIONS_PER_PEER]>, }, @@ -303,6 +337,8 @@ struct IncomingPeer { alive: bool, /// Id that the we sent to the peerset. incoming_id: IncomingIndex, + /// Received handshake. + handshake: Vec, } /// Event that can be emitted by the `Notifications`. @@ -314,6 +350,8 @@ pub enum NotificationsOut { peer_id: PeerId, /// Peerset set ID the substream is tied to. set_id: SetId, + /// Direction of the stream. + direction: Direction, /// If `Some`, a fallback protocol name has been used rather the main protocol name. /// Always matches one of the fallback names passed at initialization. negotiated_fallback: Option, @@ -364,24 +402,52 @@ pub enum NotificationsOut { impl Notifications { /// Creates a `CustomProtos`. - pub fn new( + pub(crate) fn new( protocol_controller_handles: Vec, from_protocol_controllers: TracingUnboundedReceiver, - notif_protocols: impl Iterator, + registry: &Option, + notif_protocols: impl Iterator< + Item = ( + ProtocolConfig, + ProtocolHandle, + Box + Send + Unpin>, + ), + >, ) -> Self { - let notif_protocols = notif_protocols - .map(|cfg| handler::ProtocolConfig { - name: cfg.name, - fallback_names: cfg.fallback_names, - handshake: Arc::new(RwLock::new(cfg.handshake)), - max_notification_size: cfg.max_notification_size, + let (notif_protocols, protocol_handles): (Vec<_>, Vec<_>) = notif_protocols + .map(|(cfg, protocol_handle, command_stream)| { + ( + handler::ProtocolConfig { + name: cfg.name, + fallback_names: cfg.fallback_names, + handshake: Arc::new(RwLock::new(cfg.handshake)), + max_notification_size: cfg.max_notification_size, + }, + (protocol_handle, command_stream), + ) }) - .collect::>(); - + .unzip(); assert!(!notif_protocols.is_empty()); + let metrics = registry.as_ref().and_then(|registry| metrics::register(®istry).ok()); + let (mut protocol_handles, command_streams): (Vec<_>, Vec<_>) = protocol_handles + .into_iter() + .enumerate() + .map(|(set_id, (mut protocol_handle, command_stream))| { + protocol_handle.set_metrics(metrics.clone()); + + (protocol_handle, (set_id, command_stream)) + }) + .unzip(); + + protocol_handles.iter_mut().skip(1).for_each(|handle| { + handle.delegate_to_peerset(true); + }); + Self { notif_protocols, + protocol_handles, + command_streams: StreamMap::from_iter(command_streams.into_iter()), protocol_controller_handles, from_protocol_controllers, peers: FnvHashMap::default(), @@ -390,6 +456,8 @@ impl Notifications { incoming: SmallVec::new(), next_incoming_index: IncomingIndex(0), events: VecDeque::new(), + pending_inbound_validations: FuturesUnordered::new(), + metrics, } } @@ -807,14 +875,21 @@ impl Notifications { *entry.into_mut() = PeerState::Backoff { timer, timer_deadline } }, - // Invalid state transitions. - st @ PeerState::Incoming { .. } => { - info!( + // `ProtocolController` disconnected peer while it was still being validated by the + // protocol, mark the connection as rejected and once the validation is received from + // the protocol, reject the substream + PeerState::Incoming { backoff_until, connections, incoming_index, .. } => { + debug!( target: "sub-libp2p", "PSM => Drop({}, {:?}): Ignoring obsolete disconnect, we are awaiting accept/reject.", entry.key().0, set_id, ); - *entry.into_mut() = st; + *entry.into_mut() = PeerState::Incoming { + backoff_until, + connections, + incoming_index, + peerset_rejected: true, + }; }, PeerState::Poisoned => { error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()); @@ -823,20 +898,71 @@ impl Notifications { } } + /// Substream has been accepted by the `ProtocolController` and must now be sent + /// to the protocol for validation. + fn peerset_report_preaccept(&mut self, index: IncomingIndex) { + let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) else { + error!(target: LOG_TARGET, "PSM => Preaccept({:?}): Invalid index", index); + return + }; + + trace!( + target: LOG_TARGET, + "PSM => Preaccept({:?}): Sent to protocol for validation", + index + ); + let incoming = &self.incoming[pos]; + + match self.protocol_handles[usize::from(incoming.set_id)] + .report_incoming_substream(incoming.peer_id, incoming.handshake.clone()) + { + Ok(ValidationCallResult::Delegated) => { + self.protocol_report_accept(index); + }, + Ok(ValidationCallResult::WaitForValidation(rx)) => { + self.pending_inbound_validations + .push(Box::pin(async move { (rx.await, index) })); + }, + Err(err) => { + // parachain collators enable the syncing protocol but `NotificationService` for + // `SyncingEngine` is not created which causes `report_incoming_substream()` to + // fail. This is not a fatal error and should be ignored even though in typical + // cases the `NotificationService` not existing is a fatal error and indicates that + // the protocol has exited. Until the parachain collator issue is fixed, just report + // and error and reject the peer. + debug!(target: LOG_TARGET, "protocol has exited: {err:?} {:?}", incoming.set_id); + + self.protocol_report_reject(index); + }, + } + } + /// Function that is called when the peerset wants us to accept a connection /// request from a peer. - fn peerset_report_accept(&mut self, index: IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) - { - self.incoming.remove(pos) - } else { - error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); - return + fn protocol_report_accept(&mut self, index: IncomingIndex) { + let (pos, incoming) = + if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { + (pos, self.incoming.get(pos)) + } else { + error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); + return + }; + + let Some(incoming) = incoming else { + error!(target: "sub-libp2p", "Incoming connection ({:?}) doesn't exist", index); + debug_assert!(false); + return; }; if !incoming.alive { - trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", - index, incoming.peer_id, incoming.set_id); + trace!( + target: "sub-libp2p", + "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", + index, + incoming.peer_id, + incoming.set_id, + ); + match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(PeerState::DisabledPendingEnable { .. }) | Some(PeerState::Enabled { .. }) => { }, @@ -847,26 +973,42 @@ impl Notifications { .dropped(incoming.peer_id); }, } + + self.incoming.remove(pos); return } let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(s) => s, None => { - debug_assert!(false); + log::debug!( + target: "sub-libp2p", + "Connection to {:?} closed, ({:?} {:?}), ignoring accept", + incoming.peer_id, + incoming.set_id, + index, + ); + self.incoming.remove(pos); return }, }; match mem::replace(state, PeerState::Poisoned) { // Incoming => Enabled - PeerState::Incoming { mut connections, incoming_index, .. } => { + PeerState::Incoming { + mut connections, + incoming_index, + peerset_rejected, + backoff_until, + } => { if index < incoming_index { warn!( target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.", index, incoming.peer_id, incoming.set_id, incoming_index ); + + self.incoming.remove(pos); return } else if index > incoming_index { error!( @@ -874,12 +1016,39 @@ impl Notifications { "PSM => Accept({:?}, {}, {:?}): Ignoring incoming index from the future, we are awaiting {:?}.", index, incoming.peer_id, incoming.set_id, incoming_index ); + + self.incoming.remove(pos); debug_assert!(false); return } - trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.", - index, incoming.peer_id, incoming.set_id); + // while the substream was being validated by the protocol, `Peerset` had request + // for the it to be closed so reject the substream now + if peerset_rejected { + trace!( + target: "sub-libp2p", + "Protocol accepted ({:?} {:?} {:?}) but Peerset had request disconnection, rejecting", + index, + incoming.peer_id, + incoming.set_id + ); + + *state = PeerState::Incoming { + connections, + backoff_until, + peerset_rejected, + incoming_index, + }; + return self.report_reject(index).map_or((), |_| ()); + } + + trace!( + target: "sub-libp2p", + "PSM => Accept({:?}, {}, {:?}): Enabling connections.", + index, + incoming.peer_id, + incoming.set_id + ); debug_assert!(connections .iter() @@ -898,53 +1067,85 @@ impl Notifications { *connec_state = ConnectionState::Opening; } + self.incoming.remove(pos); *state = PeerState::Enabled { connections }; }, - + st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { + self.incoming.remove(pos); + *state = st; + }, // Any state other than `Incoming` is invalid. peer => { - error!(target: "sub-libp2p", + error!( + target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer); + peer + ); + + self.incoming.remove(pos); debug_assert!(false); }, } } - /// Function that is called when the peerset wants us to reject an incoming peer. + /// Function that is called when `ProtocolController` wants us to reject an incoming peer. fn peerset_report_reject(&mut self, index: IncomingIndex) { + let _ = self.report_reject(index); + } + + /// Function that is called when the protocol wants us to reject an incoming peer. + fn protocol_report_reject(&mut self, index: IncomingIndex) { + if let Some((set_id, peer_id)) = self.report_reject(index) { + self.protocol_controller_handles[usize::from(set_id)].dropped(peer_id) + } + } + + /// Function that is called when the peerset wants us to reject an incoming peer. + fn report_reject(&mut self, index: IncomingIndex) -> Option<(SetId, PeerId)> { let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { self.incoming.remove(pos) } else { error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); - return + return None }; if !incoming.alive { - trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \ - ignoring", index, incoming.peer_id, incoming.set_id); - return + trace!( + target: "sub-libp2p", + "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, ignoring", + index, + incoming.peer_id, + incoming.set_id, + ); + + return None } let state = match self.peers.get_mut(&(incoming.peer_id, incoming.set_id)) { Some(s) => s, None => { - debug_assert!(false); - return + log::debug!( + target: "sub-libp2p", + "Connection to {:?} closed, ({:?} {:?}), ignoring accept", + incoming.peer_id, + incoming.set_id, + index, + ); + return None }, }; match mem::replace(state, PeerState::Poisoned) { // Incoming => Disabled - PeerState::Incoming { mut connections, backoff_until, incoming_index } => { + PeerState::Incoming { mut connections, backoff_until, incoming_index, .. } => { if index < incoming_index { warn!( target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Ignoring obsolete incoming index, we are already awaiting {:?}.", index, incoming.peer_id, incoming.set_id, incoming_index ); - return + return None } else if index > incoming_index { error!( target: "sub-libp2p", @@ -952,7 +1153,7 @@ impl Notifications { index, incoming.peer_id, incoming.set_id, incoming_index ); debug_assert!(false); - return + return None } trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.", @@ -976,10 +1177,20 @@ impl Notifications { } *state = PeerState::Disabled { connections, backoff_until }; + Some((incoming.set_id, incoming.peer_id)) + }, + // connection to peer may have been closed already + st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { + *state = st; + None + }, + peer => { + error!( + target: LOG_TARGET, + "State mismatch in libp2p: Expected alive incoming. Got {peer:?}.", + ); + None }, - peer => error!(target: "sub-libp2p", - "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer), } } } @@ -1021,6 +1232,7 @@ impl NetworkBehaviour for Notifications { send_back_addr: remote_addr.clone(), }, self.notif_protocols.clone(), + self.metrics.clone(), )) } @@ -1035,6 +1247,7 @@ impl NetworkBehaviour for Notifications { peer, ConnectedPoint::Dialer { address: addr.clone(), role_override }, self.notif_protocols.clone(), + self.metrics.clone(), )) } @@ -1195,7 +1408,12 @@ impl NetworkBehaviour for Notifications { }, // Incoming => Incoming | Disabled | Backoff | Ø - PeerState::Incoming { mut connections, backoff_until, incoming_index } => { + PeerState::Incoming { + mut connections, + backoff_until, + incoming_index, + peerset_rejected, + } => { trace!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.", @@ -1274,6 +1492,7 @@ impl NetworkBehaviour for Notifications { connections, backoff_until, incoming_index, + peerset_rejected, }; } }, @@ -1313,7 +1532,7 @@ impl NetworkBehaviour for Notifications { let event = NotificationsOut::CustomProtocolReplaced { peer_id, set_id, - notifications_sink: replacement_sink, + notifications_sink: replacement_sink.clone(), }; self.events.push_back(ToSwarm::GenerateEvent(event)); } @@ -1474,7 +1693,7 @@ impl NetworkBehaviour for Notifications { event: THandlerOutEvent, ) { match event { - NotifsHandlerOut::OpenDesiredByRemote { protocol_index } => { + NotifsHandlerOut::OpenDesiredByRemote { protocol_index, handshake } => { let set_id = SetId::from(protocol_index); trace!(target: "sub-libp2p", @@ -1495,7 +1714,12 @@ impl NetworkBehaviour for Notifications { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Incoming => Incoming - PeerState::Incoming { mut connections, backoff_until, incoming_index } => { + PeerState::Incoming { + mut connections, + backoff_until, + incoming_index, + peerset_rejected, + } => { debug_assert!(connections .iter() .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); @@ -1523,8 +1747,12 @@ impl NetworkBehaviour for Notifications { debug_assert!(false); } - *entry.into_mut() = - PeerState::Incoming { connections, backoff_until, incoming_index }; + *entry.into_mut() = PeerState::Incoming { + connections, + backoff_until, + incoming_index, + peerset_rejected, + }; }, PeerState::Enabled { mut connections } => { @@ -1588,11 +1816,13 @@ impl NetworkBehaviour for Notifications { set_id, alive: true, incoming_id, + handshake, }); *entry.into_mut() = PeerState::Incoming { connections, backoff_until, + peerset_rejected: false, incoming_index: incoming_id, }; } else { @@ -1725,7 +1955,7 @@ impl NetworkBehaviour for Notifications { let event = NotificationsOut::CustomProtocolReplaced { peer_id, set_id, - notifications_sink: replacement_sink, + notifications_sink: replacement_sink.clone(), }; self.events.push_back(ToSwarm::GenerateEvent(event)); } @@ -1830,8 +2060,13 @@ impl NetworkBehaviour for Notifications { peer_id, set_id, inbound, - negotiated_fallback, - received_handshake, + direction: if inbound { + Direction::Inbound + } else { + Direction::Outbound + }, + received_handshake: received_handshake.clone(), + negotiated_fallback: negotiated_fallback.clone(), notifications_sink: notifications_sink.clone(), }; self.events.push_back(ToSwarm::GenerateEvent(event)); @@ -1979,8 +2214,11 @@ impl NetworkBehaviour for Notifications { peer_id, set_id, ); - let event = NotificationsOut::Notification { peer_id, set_id, message }; - + let event = NotificationsOut::Notification { + peer_id, + set_id, + message: message.clone(), + }; self.events.push_back(ToSwarm::GenerateEvent(event)); } else { trace!( @@ -2009,10 +2247,10 @@ impl NetworkBehaviour for Notifications { loop { match futures::Stream::poll_next(Pin::new(&mut self.from_protocol_controllers), cx) { Poll::Ready(Some(Message::Accept(index))) => { - self.peerset_report_accept(index); + self.peerset_report_preaccept(index); }, Poll::Ready(Some(Message::Reject(index))) => { - self.peerset_report_reject(index); + let _ = self.peerset_report_reject(index); }, Poll::Ready(Some(Message::Connect { peer_id, set_id, .. })) => { self.peerset_report_connect(peer_id, set_id); @@ -2031,6 +2269,43 @@ impl NetworkBehaviour for Notifications { } } + // poll commands from protocols + loop { + match futures::Stream::poll_next(Pin::new(&mut self.command_streams), cx) { + Poll::Ready(Some((set_id, command))) => match command { + NotificationCommand::SetHandshake(handshake) => { + self.set_notif_protocol_handshake(set_id.into(), handshake); + }, + NotificationCommand::OpenSubstream(_peer) | + NotificationCommand::CloseSubstream(_peer) => { + todo!("substream control not implemented"); + }, + }, + Poll::Ready(None) => { + error!(target: LOG_TARGET, "Protocol command streams have been shut down"); + break + }, + Poll::Pending => break, + } + } + + while let Poll::Ready(Some((result, index))) = + self.pending_inbound_validations.poll_next_unpin(cx) + { + match result { + Ok(ValidationResult::Accept) => { + self.protocol_report_accept(index); + }, + Ok(ValidationResult::Reject) => { + self.protocol_report_reject(index); + }, + Err(_) => { + error!(target: LOG_TARGET, "Protocol has shut down"); + break + }, + } + } + while let Poll::Ready(Some((delay_id, peer_id, set_id))) = Pin::new(&mut self.delays).poll_next(cx) { @@ -2153,7 +2428,10 @@ mod tests { } } - fn development_notifs() -> (Notifications, ProtocolController) { + fn development_notifs( + ) -> (Notifications, ProtocolController, Box) { + let (protocol_handle_pair, notif_service) = + crate::protocol::notifications::service::notification_service("/proto/1".into()); let (to_notifications, from_controller) = tracing_unbounded("test_controller_to_notifications", 10_000); @@ -2169,24 +2447,31 @@ mod tests { Box::new(MockPeerStore {}), ); + let (notif_handle, command_stream) = protocol_handle_pair.split(); ( Notifications::new( vec![handle], from_controller, - iter::once(ProtocolConfig { - name: "/foo".into(), - fallback_names: Vec::new(), - handshake: vec![1, 2, 3, 4], - max_notification_size: u64::MAX, - }), + &None, + iter::once(( + ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: vec![1, 2, 3, 4], + max_notification_size: u64::MAX, + }, + notif_handle, + command_stream, + )), ), controller, + notif_service, ) } #[test] fn update_handshake() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let inner = notif.notif_protocols.get_mut(0).unwrap().handshake.read().clone(); assert_eq!(inner, vec![1, 2, 3, 4]); @@ -2201,14 +2486,14 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn update_unknown_handshake() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); notif.set_notif_protocol_handshake(1337.into(), vec![5, 6, 7, 8]); } #[test] fn disconnect_backoff_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( @@ -2225,7 +2510,7 @@ mod tests { #[test] fn disconnect_pending_request() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( @@ -2242,7 +2527,7 @@ mod tests { #[test] fn disconnect_requested_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); notif.peers.insert((peer, 0.into()), PeerState::Requested); @@ -2253,7 +2538,7 @@ mod tests { #[test] fn disconnect_disabled_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); notif.peers.insert( (peer, 0.into()), @@ -2269,7 +2554,7 @@ mod tests { #[test] fn remote_opens_connection_and_substream() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2299,7 +2584,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); if let Some(&PeerState::Incoming { ref connections, backoff_until: None, .. }) = @@ -2319,7 +2607,7 @@ mod tests { #[tokio::test] async fn disconnect_remote_substream_before_handled_by_controller() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { @@ -2339,7 +2627,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); notif.disconnect_peer(&peer, 0.into()); @@ -2355,7 +2646,7 @@ mod tests { #[test] fn peerset_report_connect_backoff() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -2393,7 +2684,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2420,7 +2711,7 @@ mod tests { #[test] fn peerset_connect_incoming() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2444,19 +2735,22 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); // attempt to connect to the peer and verify that the peer state is `Enabled`; // we rely on implementation detail that incoming indices are counted from 0 // to not mock the `Peerset` - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); } #[test] fn peerset_disconnect_disable_pending_enable() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -2503,7 +2797,7 @@ mod tests { #[test] fn peerset_disconnect_enabled() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2525,11 +2819,14 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); // we rely on the implementation detail that incoming indices are counted from 0 // to not mock the `Peerset` - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); // disconnect peer and verify that the state is `Disabled` @@ -2539,7 +2836,7 @@ mod tests { #[test] fn peerset_disconnect_requested() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); @@ -2554,7 +2851,7 @@ mod tests { #[test] fn peerset_disconnect_pending_request() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -2587,7 +2884,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2607,7 +2904,7 @@ mod tests { #[test] fn peerset_accept_peer_not_alive() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2631,7 +2928,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -2647,14 +2947,14 @@ mod tests { IncomingPeer { alive: false, incoming_id: IncomingIndex(0), .. }, )); - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert_eq!(notif.incoming.len(), 0); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(PeerState::Disabled { .. }))); } #[test] fn secondary_connection_peer_state_incoming() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -2678,7 +2978,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); if let Some(PeerState::Incoming { connections, .. }) = notif.peers.get(&(peer, set_id)) { assert_eq!(connections.len(), 1); @@ -2709,7 +3012,7 @@ mod tests { #[test] fn close_connection_for_disabled_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2734,7 +3037,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2743,7 +3046,7 @@ mod tests { #[test] fn close_connection_for_incoming_peer_one_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2766,7 +3069,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -2775,7 +3081,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2788,7 +3094,7 @@ mod tests { #[test] fn close_connection_for_incoming_peer_two_connections() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let conn1 = ConnectionId::new_unchecked(1); @@ -2815,7 +3121,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -2842,7 +3151,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -2857,7 +3166,7 @@ mod tests { #[test] fn connection_and_substream_open() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -2882,13 +3191,16 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); // We rely on the implementation detail that incoming indices are counted // from 0 to not mock the `Peerset`. - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); // open new substream @@ -2911,7 +3223,7 @@ mod tests { #[test] fn connection_closed_sink_replaced() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -2947,7 +3259,10 @@ mod tests { notif.on_connection_handler_event( peer, conn2, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); if let Some(PeerState::Enabled { connections, .. }) = notif.peers.get(&(peer, set_id)) { @@ -2984,7 +3299,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3005,7 +3320,7 @@ mod tests { #[test] fn dial_failure_for_requested_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); @@ -3028,7 +3343,7 @@ mod tests { #[tokio::test] async fn write_notification() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -3077,7 +3392,7 @@ mod tests { #[test] fn peerset_report_connect_backoff_expired() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3110,7 +3425,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3125,7 +3440,7 @@ mod tests { #[test] fn peerset_report_disconnect_disabled() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -3151,7 +3466,7 @@ mod tests { #[test] fn peerset_report_disconnect_backoff() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3184,7 +3499,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3197,7 +3512,7 @@ mod tests { #[test] fn peer_is_backed_off_if_both_connections_get_closed_while_peer_is_disabled_with_back_off() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); @@ -3247,7 +3562,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected.clone(), vec![]), + handler: NotifsHandler::new(peer, connected.clone(), vec![], None), remaining_established: 0usize, }, )); @@ -3261,7 +3576,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3270,7 +3585,7 @@ mod tests { #[test] fn inject_connection_closed_incoming_with_backoff() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -3294,7 +3609,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); // manually add backoff for the entry @@ -3312,7 +3630,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3321,7 +3639,7 @@ mod tests { #[test] fn two_connections_inactive_connection_gets_closed_peer_state_is_still_incoming() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -3355,7 +3673,10 @@ mod tests { notif.on_connection_handler_event( peer, conn1, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!( notif.peers.get_mut(&(peer, 0.into())), @@ -3367,7 +3688,7 @@ mod tests { peer_id: peer, connection_id: conn2, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3376,7 +3697,7 @@ mod tests { #[test] fn two_connections_active_connection_gets_closed_peer_state_is_disabled() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -3413,7 +3734,10 @@ mod tests { notif.on_connection_handler_event( peer, conn1, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!( notif.peers.get_mut(&(peer, 0.into())), @@ -3425,7 +3749,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3434,7 +3758,7 @@ mod tests { #[test] fn inject_connection_closed_for_active_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn1 = ConnectionId::new_unchecked(0); let conn2 = ConnectionId::new_unchecked(1); @@ -3494,7 +3818,7 @@ mod tests { peer_id: peer, connection_id: conn1, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3502,7 +3826,7 @@ mod tests { #[test] fn inject_dial_failure_for_pending_request() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3535,7 +3859,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3565,7 +3889,7 @@ mod tests { #[test] fn peerstate_incoming_open_desired_by_remote() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn1 = ConnectionId::new_unchecked(0); @@ -3599,7 +3923,10 @@ mod tests { notif.on_connection_handler_event( peer, conn1, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -3607,7 +3934,10 @@ mod tests { notif.on_connection_handler_event( peer, conn2, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); if let Some(PeerState::Incoming { ref connections, .. }) = notif.peers.get(&(peer, set_id)) @@ -3619,7 +3949,7 @@ mod tests { #[tokio::test] async fn remove_backoff_peer_after_timeout() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -3652,7 +3982,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3697,7 +4027,7 @@ mod tests { #[tokio::test] async fn reschedule_disabled_pending_enable_when_connection_not_closed() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -3726,13 +4056,16 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); // we rely on the implementation detail that incoming indices are counted from 0 // to not mock the `Peerset` - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); let event = conn_yielder.open_substream(peer, 0, connected, vec![1, 2, 3, 4]); @@ -3815,7 +4148,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn peerset_report_connect_with_enabled_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -3840,7 +4173,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -3865,7 +4201,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_connect_with_disabled_pending_enable_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3911,7 +4247,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_connect_with_requested_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); @@ -3927,7 +4263,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_connect_with_pending_requested() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -3960,7 +4296,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -3984,7 +4320,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_connect_with_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -4008,7 +4344,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4019,7 +4358,7 @@ mod tests { #[test] #[cfg(debug_assertions)] fn peerset_report_disconnect_with_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); @@ -4043,7 +4382,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4052,13 +4394,68 @@ mod tests { } #[test] - #[should_panic] #[cfg(debug_assertions)] - fn peerset_report_accept_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + fn peerset_report_disconnect_with_incoming_peer_protocol_accepts() { + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); + let set_id = SetId::from(0); let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, + ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + + // `Peerset` wants to disconnect the peer but since it's still under validation, + // it won't be disabled automatically + notif.peerset_report_disconnect(peer, set_id); + + let incoming_index = match notif.peers.get(&(peer, set_id)) { + Some(&PeerState::Incoming { peerset_rejected, incoming_index, .. }) => { + assert!(peerset_rejected); + incoming_index + }, + state => panic!("invalid state: {state:?}"), + }; + + // protocol accepted peer but since `Peerset` wanted to disconnect it, the peer will be + // disabled + notif.protocol_report_accept(incoming_index); + + match notif.peers.get(&(peer, set_id)) { + Some(&PeerState::Disabled { .. }) => {}, + state => panic!("invalid state: {state:?}"), + }; + } + + #[test] + #[cfg(debug_assertions)] + fn peer_disconnected_protocol_accepts() { + let (mut notif, _controller, _notif_service) = development_notifs(); + let peer = PeerId::random(); let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), send_back_addr: Multiaddr::empty(), @@ -4079,24 +4476,188 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); - assert!(std::matches!( - notif.incoming[0], - IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }, + assert!(notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0))); + notif.disconnect_peer(&peer, set_id); + + // since the connection was closed, nothing happens for the peer state because + // there is nothing actionable + notif.protocol_report_accept(IncomingIndex(0)); + + match notif.peers.get(&(peer, set_id)) { + Some(&PeerState::Disabled { .. }) => {}, + state => panic!("invalid state: {state:?}"), + }; + + assert!(!notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0))); + } + + #[test] + #[cfg(debug_assertions)] + fn connection_closed_protocol_accepts() { + let (mut notif, _controller, _notif_service) = development_notifs(); + let peer = PeerId::random(); + let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, + ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + + notif.on_swarm_event(FromSwarm::ConnectionClosed( + libp2p::swarm::behaviour::ConnectionClosed { + peer_id: peer, + connection_id: ConnectionId::new_unchecked(0), + endpoint: &connected.clone(), + handler: NotifsHandler::new(peer, connected, vec![], None), + remaining_established: 0usize, + }, )); - notif.peers.remove(&(peer, set_id)); - notif.peerset_report_accept(IncomingIndex(0)); + // connection closed, nothing to do + notif.protocol_report_accept(IncomingIndex(0)); + + match notif.peers.get(&(peer, set_id)) { + None => {}, + state => panic!("invalid state: {state:?}"), + }; + } + + #[test] + #[cfg(debug_assertions)] + fn peer_disconnected_protocol_reject() { + let (mut notif, _controller, _notif_service) = development_notifs(); + let peer = PeerId::random(); + let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, + ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + + assert!(notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0))); + notif.disconnect_peer(&peer, set_id); + + // since the connection was closed, nothing happens for the peer state because + // there is nothing actionable + notif.protocol_report_reject(IncomingIndex(0)); + + match notif.peers.get(&(peer, set_id)) { + Some(&PeerState::Disabled { .. }) => {}, + state => panic!("invalid state: {state:?}"), + }; + + assert!(!notif.incoming.iter().any(|entry| entry.incoming_id == IncomingIndex(0))); + } + + #[test] + #[cfg(debug_assertions)] + fn connection_closed_protocol_rejects() { + let (mut notif, _controller, _notif_service) = development_notifs(); + let peer = PeerId::random(); + let set_id = SetId::from(0); + let conn = ConnectionId::new_unchecked(0); + let connected = ConnectedPoint::Listener { + local_addr: Multiaddr::empty(), + send_back_addr: Multiaddr::empty(), + }; + + notif.on_swarm_event(FromSwarm::ConnectionEstablished( + libp2p::swarm::behaviour::ConnectionEstablished { + peer_id: peer, + connection_id: conn, + endpoint: &connected, + failed_addresses: &[], + other_established: 0usize, + }, + )); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + + // remote opens a substream, verify that peer state is updated to `Incoming` + notif.on_connection_handler_event( + peer, + conn, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, + ); + assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); + + notif.on_swarm_event(FromSwarm::ConnectionClosed( + libp2p::swarm::behaviour::ConnectionClosed { + peer_id: peer, + connection_id: ConnectionId::new_unchecked(0), + endpoint: &connected.clone(), + handler: NotifsHandler::new(peer, connected, vec![], None), + remaining_established: 0usize, + }, + )); + + // connection closed, nothing to do + notif.protocol_report_reject(IncomingIndex(0)); + + match notif.peers.get(&(peer, set_id)) { + None => {}, + state => panic!("invalid state: {state:?}"), + }; } #[test] #[should_panic] #[cfg(debug_assertions)] - fn peerset_report_accept_not_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + fn protocol_report_accept_not_incoming_peer() { + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4121,7 +4682,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4138,14 +4702,14 @@ mod tests { assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Enabled { .. }))); notif.incoming[0].alive = true; - notif.peerset_report_accept(IncomingIndex(0)); + notif.protocol_report_accept(IncomingIndex(0)); } #[test] #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_non_existent_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let endpoint = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), @@ -4157,7 +4721,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(0), endpoint: &endpoint.clone(), - handler: NotifsHandler::new(peer, endpoint, vec![]), + handler: NotifsHandler::new(peer, endpoint, vec![], None), remaining_established: 0usize, }, )); @@ -4165,7 +4729,7 @@ mod tests { #[test] fn disconnect_non_existent_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let set_id = SetId::from(0); @@ -4177,9 +4741,9 @@ mod tests { #[test] fn accept_non_existent_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); - notif.peerset_report_accept(0.into()); + notif.protocol_report_accept(0.into()); assert!(notif.peers.is_empty()); assert!(notif.incoming.is_empty()); @@ -4187,9 +4751,9 @@ mod tests { #[test] fn reject_non_existent_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); - notif.peerset_report_reject(0.into()); + notif.protocol_report_reject(0.into()); assert!(notif.peers.is_empty()); assert!(notif.incoming.is_empty()); @@ -4197,7 +4761,7 @@ mod tests { #[test] fn reject_non_active_connection() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4221,61 +4785,24 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); notif.incoming[0].alive = false; - notif.peerset_report_reject(0.into()); - - assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] - fn reject_non_existent_peer_but_alive_connection() { - let (mut notif, _controller) = development_notifs(); - let peer = PeerId::random(); - let conn = ConnectionId::new_unchecked(0); - let set_id = SetId::from(0); - let connected = ConnectedPoint::Listener { - local_addr: Multiaddr::empty(), - send_back_addr: Multiaddr::empty(), - }; - - notif.on_swarm_event(FromSwarm::ConnectionEstablished( - libp2p::swarm::behaviour::ConnectionEstablished { - peer_id: peer, - connection_id: conn, - endpoint: &connected, - failed_addresses: &[], - other_established: 0usize, - }, - )); - assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Disabled { .. }))); + notif.protocol_report_reject(0.into()); - // remote opens a substream, verify that peer state is updated to `Incoming` - notif.on_connection_handler_event( - peer, - conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, - ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); - assert!(std::matches!( - notif.incoming[0], - IncomingPeer { alive: true, incoming_id: IncomingIndex(0), .. }, - )); - - notif.peers.remove(&(peer, set_id)); - notif.peerset_report_reject(0.into()); } #[test] #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_incoming_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4299,7 +4826,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4308,7 +4838,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4318,7 +4848,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_disabled_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -4343,7 +4873,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4353,7 +4883,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_non_existent_connection_closed_for_disabled_pending_enable() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -4394,7 +4924,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4404,7 +4934,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_incoming_peer_state_mismatch() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4428,7 +4958,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); notif.incoming[0].alive = false; @@ -4438,7 +4971,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4448,7 +4981,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_enabled_state_mismatch() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); let set_id = SetId::from(0); @@ -4472,7 +5005,10 @@ mod tests { notif.on_connection_handler_event( peer, conn, - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index: 0, + handshake: vec![1, 3, 3, 7], + }, ); assert!(std::matches!(notif.peers.get(&(peer, set_id)), Some(&PeerState::Incoming { .. }))); @@ -4485,7 +5021,7 @@ mod tests { peer_id: peer, connection_id: ConnectionId::new_unchecked(1337), endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4495,7 +5031,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn inject_connection_closed_for_backoff_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let set_id = SetId::from(0); let peer = PeerId::random(); let conn = ConnectionId::new_unchecked(0); @@ -4528,7 +5064,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected.clone(), vec![]), + handler: NotifsHandler::new(peer, connected.clone(), vec![], None), remaining_established: 0usize, }, )); @@ -4539,7 +5075,7 @@ mod tests { peer_id: peer, connection_id: conn, endpoint: &connected.clone(), - handler: NotifsHandler::new(peer, connected, vec![]), + handler: NotifsHandler::new(peer, connected, vec![], None), remaining_established: 0usize, }, )); @@ -4549,7 +5085,7 @@ mod tests { #[should_panic] #[cfg(debug_assertions)] fn open_result_ok_non_existent_peer() { - let (mut notif, _controller) = development_notifs(); + let (mut notif, _controller, _notif_service) = development_notifs(); let conn = ConnectionId::new_unchecked(0); let connected = ConnectedPoint::Listener { local_addr: Multiaddr::empty(), diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index cffdec7d71e..28662be29fe 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -58,9 +58,12 @@ //! [`NotifsHandlerIn::Open`] has gotten an answer. use crate::{ - protocol::notifications::upgrade::{ - NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream, - UpgradeCollec, + protocol::notifications::{ + service::metrics, + upgrade::{ + NotificationsIn, NotificationsInSubstream, NotificationsOut, NotificationsOutSubstream, + UpgradeCollec, + }, }, types::ProtocolName, }; @@ -92,7 +95,7 @@ use std::{ /// Number of pending notifications in asynchronous contexts. /// See [`NotificationsSink::reserve_notification`] for context. -const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; +pub(crate) const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; /// Number of pending notifications in synchronous contexts. const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; @@ -126,11 +129,19 @@ pub struct NotifsHandler { events_queue: VecDeque< ConnectionHandlerEvent, >, + + /// Metrics. + metrics: Option>, } impl NotifsHandler { /// Creates new [`NotifsHandler`]. - pub fn new(peer_id: PeerId, endpoint: ConnectedPoint, protocols: Vec) -> Self { + pub fn new( + peer_id: PeerId, + endpoint: ConnectedPoint, + protocols: Vec, + metrics: Option, + ) -> Self { Self { protocols: protocols .into_iter() @@ -148,6 +159,7 @@ impl NotifsHandler { endpoint, when_connection_open: Instant::now(), events_queue: VecDeque::with_capacity(16), + metrics: metrics.map_or(None, |metrics| Some(Arc::new(metrics))), } } } @@ -303,6 +315,8 @@ pub enum NotifsHandlerOut { OpenDesiredByRemote { /// Index of the protocol in the list of protocols passed at initialization. protocol_index: usize, + /// Received handshake. + handshake: Vec, }, /// The remote would like the substreams to be closed. Send a [`NotifsHandlerIn::Close`] in @@ -331,6 +345,36 @@ pub enum NotifsHandlerOut { #[derive(Debug, Clone)] pub struct NotificationsSink { inner: Arc, + metrics: Option>, +} + +impl NotificationsSink { + /// Create new [`NotificationsSink`]. + /// NOTE: only used for testing but must be `pub` as other crates in `client/network` use this. + pub fn new( + peer_id: PeerId, + ) -> (Self, mpsc::Receiver, mpsc::Receiver) + { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + ( + NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + peer_id, + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(Some(sync_tx)), + }), + metrics: None, + }, + async_rx, + sync_rx, + ) + } + + /// Get reference to metrics. + pub fn metrics(&self) -> &Option> { + &self.metrics + } } #[derive(Debug)] @@ -350,8 +394,8 @@ struct NotificationsSinkInner { /// Message emitted through the [`NotificationsSink`] and processed by the background task /// dedicated to the peer. -#[derive(Debug)] -enum NotificationsSinkMessage { +#[derive(Debug, PartialEq, Eq)] +pub enum NotificationsSinkMessage { /// Message emitted by [`NotificationsSink::reserve_notification`] and /// [`NotificationsSink::write_notification_now`]. Notification { message: Vec }, @@ -379,8 +423,8 @@ impl NotificationsSink { let mut lock = self.inner.sync_channel.lock(); if let Some(tx) = lock.as_mut() { - let result = - tx.try_send(NotificationsSinkMessage::Notification { message: message.into() }); + let message = message.into(); + let result = tx.try_send(NotificationsSinkMessage::Notification { message }); if result.is_err() { // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the @@ -476,7 +520,10 @@ impl ConnectionHandler for NotifsHandler { match protocol_info.state { State::Closed { pending_opening } => { self.events_queue.push_back(ConnectionHandlerEvent::Custom( - NotifsHandlerOut::OpenDesiredByRemote { protocol_index }, + NotifsHandlerOut::OpenDesiredByRemote { + protocol_index, + handshake: in_substream_open.handshake, + }, )); protocol_info.state = State::OpenDesiredByRemote { @@ -531,6 +578,7 @@ impl ConnectionHandler for NotifsHandler { async_channel: FuturesMutex::new(async_tx), sync_channel: Mutex::new(Some(sync_tx)), }), + metrics: self.metrics.clone(), }; self.protocols[protocol_index].state = State::Open { @@ -881,6 +929,7 @@ pub mod tests { async_channel: FuturesMutex::new(async_tx), sync_channel: Mutex::new(Some(sync_tx)), }), + metrics: None, }; let (in_substream, out_substream) = MockSubstream::new(); @@ -1040,6 +1089,7 @@ pub mod tests { }, peer_id: PeerId::random(), events_queue: VecDeque::new(), + metrics: None, } } @@ -1545,6 +1595,7 @@ pub mod tests { async_channel: FuturesMutex::new(async_tx), sync_channel: Mutex::new(Some(sync_tx)), }), + metrics: None, }; handler.protocols[0].state = State::Open { @@ -1597,7 +1648,7 @@ pub mod tests { assert!(std::matches!( handler.poll(cx), Poll::Ready(ConnectionHandlerEvent::Custom( - NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0 }, + NotifsHandlerOut::OpenDesiredByRemote { protocol_index: 0, .. }, )) )); assert!(std::matches!( diff --git a/substrate/client/network/src/protocol/notifications/service/metrics.rs b/substrate/client/network/src/protocol/notifications/service/metrics.rs new file mode 100644 index 00000000000..2a57d57c175 --- /dev/null +++ b/substrate/client/network/src/protocol/notifications/service/metrics.rs @@ -0,0 +1,130 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::types::ProtocolName; + +use prometheus_endpoint::{ + self as prometheus, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, + U64, +}; + +use std::sync::Arc; + +/// Notification metrics. +#[derive(Debug, Clone)] +pub struct Metrics { + // Total number of opened substreams. + pub notifications_streams_opened_total: CounterVec, + + /// Total number of closed substreams. + pub notifications_streams_closed_total: CounterVec, + + /// In/outbound notification sizes. + pub notifications_sizes: HistogramVec, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + notifications_sizes: prometheus::register( + HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "substrate_sub_libp2p_notifications_sizes", + "Sizes of the notifications send to and received from all nodes", + ), + buckets: prometheus::exponential_buckets(64.0, 4.0, 8) + .expect("parameters are always valid values; qed"), + }, + &["direction", "protocol"], + )?, + registry, + )?, + notifications_streams_closed_total: prometheus::register( + CounterVec::new( + Opts::new( + "substrate_sub_libp2p_notifications_streams_closed_total", + "Total number of notification substreams that have been closed", + ), + &["protocol"], + )?, + registry, + )?, + notifications_streams_opened_total: prometheus::register( + CounterVec::new( + Opts::new( + "substrate_sub_libp2p_notifications_streams_opened_total", + "Total number of notification substreams that have been opened", + ), + &["protocol"], + )?, + registry, + )?, + }) + } +} + +/// Register metrics. +pub fn register(registry: &Registry) -> Result { + Metrics::register(registry) +} + +/// Register opened substream to Prometheus. +pub fn register_substream_opened(metrics: &Option, protocol: &ProtocolName) { + if let Some(metrics) = metrics { + metrics.notifications_streams_opened_total.with_label_values(&[&protocol]).inc(); + } +} + +/// Register closed substream to Prometheus. +pub fn register_substream_closed(metrics: &Option, protocol: &ProtocolName) { + if let Some(metrics) = metrics { + metrics + .notifications_streams_closed_total + .with_label_values(&[&protocol[..]]) + .inc(); + } +} + +/// Register sent notification to Prometheus. +pub fn register_notification_sent( + metrics: &Option>, + protocol: &ProtocolName, + size: usize, +) { + if let Some(metrics) = metrics { + metrics + .notifications_sizes + .with_label_values(&["out", protocol]) + .observe(size as f64); + } +} + +/// Register received notification to Prometheus. +pub fn register_notification_received( + metrics: &Option, + protocol: &ProtocolName, + size: usize, +) { + if let Some(metrics) = metrics { + metrics + .notifications_sizes + .with_label_values(&["in", protocol]) + .observe(size as f64); + } +} diff --git a/substrate/client/network/src/protocol/notifications/service/mod.rs b/substrate/client/network/src/protocol/notifications/service/mod.rs new file mode 100644 index 00000000000..62e6d88a3d5 --- /dev/null +++ b/substrate/client/network/src/protocol/notifications/service/mod.rs @@ -0,0 +1,634 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Notification service implementation. + +use crate::{ + error, + protocol::notifications::handler::NotificationsSink, + service::traits::{ + Direction, MessageSink, NotificationEvent, NotificationService, ValidationResult, + }, + types::ProtocolName, +}; + +use futures::{ + stream::{FuturesUnordered, Stream}, + StreamExt, +}; +use libp2p::PeerId; +use parking_lot::Mutex; +use tokio::sync::{mpsc, oneshot}; +use tokio_stream::wrappers::ReceiverStream; + +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; + +use std::{collections::HashMap, fmt::Debug, sync::Arc}; + +pub(crate) mod metrics; + +#[cfg(test)] +mod tests; + +/// Logging target for the file. +const LOG_TARGET: &str = "sub-libp2p"; + +/// Default command queue size. +const COMMAND_QUEUE_SIZE: usize = 64; + +/// Type representing subscribers of a notification protocol. +type Subscribers = Arc>>>; + +/// Type represending a distributable message sink. +/// Detached message sink must carry the protocol name for registering metrics. +/// +/// See documentation for [`PeerContext`] for more details. +type NotificationSink = Arc>; + +#[async_trait::async_trait] +impl MessageSink for NotificationSink { + /// Send synchronous `notification` to the peer associated with this [`MessageSink`]. + fn send_sync_notification(&self, notification: Vec) { + let sink = self.lock(); + + metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification.len()); + sink.0.send_sync_notification(notification); + } + + /// Send an asynchronous `notification` to the peer associated with this [`MessageSink`], + /// allowing sender to exercise backpressure. + /// + /// Returns an error if the peer does not exist. + async fn send_async_notification(&self, notification: Vec) -> Result<(), error::Error> { + // notification sink must be cloned because the lock cannot be held across `.await` + // this makes the implementation less efficient but not prohibitively so as the same + // method is also used by `NetworkService` when sending notifications. + let notification_len = notification.len(); + let sink = self.lock().clone(); + let permit = sink + .0 + .reserve_notification() + .await + .map_err(|_| error::Error::ConnectionClosed)?; + + permit.send(notification).map_err(|_| error::Error::ChannelClosed).map(|res| { + metrics::register_notification_sent(&sink.0.metrics(), &sink.1, notification_len); + res + }) + } +} + +/// Inner notification event to deal with `NotificationsSinks` without exposing that +/// implementation detail to [`NotificationService`] consumers. +#[derive(Debug)] +enum InnerNotificationEvent { + /// Validate inbound substream. + ValidateInboundSubstream { + /// Peer ID. + peer: PeerId, + + /// Received handshake. + handshake: Vec, + + /// `oneshot::Sender` for sending validation result back to `Notifications` + result_tx: oneshot::Sender, + }, + + /// Notification substream open to `peer`. + NotificationStreamOpened { + /// Peer ID. + peer: PeerId, + + /// Direction of the substream. + direction: Direction, + + /// Received handshake. + handshake: Vec, + + /// Negotiated fallback. + negotiated_fallback: Option, + + /// Notification sink. + sink: NotificationsSink, + }, + + /// Substream was closed. + NotificationStreamClosed { + /// Peer ID. + peer: PeerId, + }, + + /// Notification was received from the substream. + NotificationReceived { + /// Peer ID. + peer: PeerId, + + /// Received notification. + notification: Vec, + }, + + /// Notification sink has been replaced. + NotificationSinkReplaced { + /// Peer ID. + peer: PeerId, + + /// Notification sink. + sink: NotificationsSink, + }, +} + +/// Notification commands. +/// +/// Sent by the installed protocols to `Notifications` to open/close/modify substreams. +#[derive(Debug)] +pub enum NotificationCommand { + /// Instruct `Notifications` to open a substream to peer. + #[allow(unused)] + OpenSubstream(PeerId), + + /// Instruct `Notifications` to close the substream to peer. + #[allow(unused)] + CloseSubstream(PeerId), + + /// Set handshake for the notifications protocol. + SetHandshake(Vec), +} + +/// Context assigned to each peer. +/// +/// Contains `NotificationsSink` used by [`NotificationService`] to send notifications +/// and an additional, distributable `NotificationsSink` which the protocol may acquire +/// if it wishes to send notifications through `NotificationsSink` directly. +/// +/// The distributable `NoticationsSink` is wrapped in an `Arc>` to allow +/// `NotificationsService` to swap the underlying sink in case it's replaced. +#[derive(Debug, Clone)] +struct PeerContext { + /// Sink for sending notificaitons. + sink: NotificationsSink, + + /// Distributable notification sink. + shared_sink: NotificationSink, +} + +/// Handle that is passed on to the notifications protocol. +#[derive(Debug)] +pub struct NotificationHandle { + /// Protocol name. + protocol: ProtocolName, + + /// TX channel for sending commands to `Notifications`. + tx: mpsc::Sender, + + /// RX channel for receiving events from `Notifications`. + rx: TracingUnboundedReceiver, + + /// All subscribers of `NotificationEvent`s. + subscribers: Subscribers, + + /// Connected peers. + peers: HashMap, +} + +impl NotificationHandle { + /// Create new [`NotificationHandle`]. + fn new( + protocol: ProtocolName, + tx: mpsc::Sender, + rx: TracingUnboundedReceiver, + subscribers: Arc>>>, + ) -> Self { + Self { protocol, tx, rx, subscribers, peers: HashMap::new() } + } +} + +#[async_trait::async_trait] +impl NotificationService for NotificationHandle { + /// Instruct `Notifications` to open a new substream for `peer`. + async fn open_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + todo!("support for opening substreams not implemented yet"); + } + + /// Instruct `Notifications` to close substream for `peer`. + async fn close_substream(&mut self, _peer: PeerId) -> Result<(), ()> { + todo!("support for closing substreams not implemented yet, call `NetworkService::disconnect_peer()` instead"); + } + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, peer: &PeerId, notification: Vec) { + if let Some(info) = self.peers.get(&peer) { + metrics::register_notification_sent( + &info.sink.metrics(), + &self.protocol, + notification.len(), + ); + + let _ = info.sink.send_sync_notification(notification); + } + } + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + async fn send_async_notification( + &self, + peer: &PeerId, + notification: Vec, + ) -> Result<(), error::Error> { + let notification_len = notification.len(); + let sink = &self.peers.get(&peer).ok_or_else(|| error::Error::PeerDoesntExist(*peer))?.sink; + + sink.reserve_notification() + .await + .map_err(|_| error::Error::ConnectionClosed)? + .send(notification) + .map_err(|_| error::Error::ChannelClosed) + .map(|res| { + metrics::register_notification_sent( + &sink.metrics(), + &self.protocol, + notification_len, + ); + res + }) + } + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, handshake: Vec) -> Result<(), ()> { + log::trace!(target: LOG_TARGET, "{}: set handshake to {handshake:?}", self.protocol); + + self.tx.send(NotificationCommand::SetHandshake(handshake)).await.map_err(|_| ()) + } + + /// Non-blocking variant of `set_handshake()` that attempts to update the handshake + /// and returns an error if the channel is blocked. + /// + /// Technically the function can return an error if the channel to `Notifications` is closed + /// but that doesn't happen under normal operation. + fn try_set_handshake(&mut self, handshake: Vec) -> Result<(), ()> { + self.tx.try_send(NotificationCommand::SetHandshake(handshake)).map_err(|_| ()) + } + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option { + loop { + match self.rx.next().await? { + InnerNotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } => + return Some(NotificationEvent::ValidateInboundSubstream { + peer, + handshake, + result_tx, + }), + InnerNotificationEvent::NotificationStreamOpened { + peer, + handshake, + negotiated_fallback, + direction, + sink, + } => { + self.peers.insert( + peer, + PeerContext { + sink: sink.clone(), + shared_sink: Arc::new(Mutex::new((sink, self.protocol.clone()))), + }, + ); + return Some(NotificationEvent::NotificationStreamOpened { + peer, + handshake, + direction, + negotiated_fallback, + }) + }, + InnerNotificationEvent::NotificationStreamClosed { peer } => { + self.peers.remove(&peer); + return Some(NotificationEvent::NotificationStreamClosed { peer }) + }, + InnerNotificationEvent::NotificationReceived { peer, notification } => + return Some(NotificationEvent::NotificationReceived { peer, notification }), + InnerNotificationEvent::NotificationSinkReplaced { peer, sink } => { + match self.peers.get_mut(&peer) { + None => log::error!( + "{}: notification sink replaced for {peer} but peer does not exist", + self.protocol + ), + Some(context) => { + context.sink = sink.clone(); + *context.shared_sink.lock() = (sink.clone(), self.protocol.clone()); + }, + } + }, + } + } + } + + // Clone [`NotificationService`] + fn clone(&mut self) -> Result, ()> { + let mut subscribers = self.subscribers.lock(); + let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + subscribers.push(event_tx); + + Ok(Box::new(NotificationHandle { + protocol: self.protocol.clone(), + tx: self.tx.clone(), + rx: event_rx, + peers: self.peers.clone(), + subscribers: self.subscribers.clone(), + })) + } + + /// Get protocol name. + fn protocol(&self) -> &ProtocolName { + &self.protocol + } + + /// Get message sink of the peer. + fn message_sink(&self, peer: &PeerId) -> Option> { + match self.peers.get(peer) { + Some(context) => Some(Box::new(context.shared_sink.clone())), + None => None, + } + } +} + +/// Channel pair which allows `Notifications` to interact with a protocol. +#[derive(Debug)] +pub struct ProtocolHandlePair { + /// Protocol name. + protocol: ProtocolName, + + /// Subscribers of the notification protocol events. + subscribers: Subscribers, + + // Receiver for notification commands received from the protocol implementation. + rx: mpsc::Receiver, +} + +impl ProtocolHandlePair { + /// Create new [`ProtocolHandlePair`]. + fn new( + protocol: ProtocolName, + subscribers: Subscribers, + rx: mpsc::Receiver, + ) -> Self { + Self { protocol, subscribers, rx } + } + + /// Consume `self` and split [`ProtocolHandlePair`] into a handle which allows it to send events + /// to the protocol and a stream of commands received from the protocol. + pub(crate) fn split( + self, + ) -> (ProtocolHandle, Box + Send + Unpin>) { + ( + ProtocolHandle::new(self.protocol, self.subscribers), + Box::new(ReceiverStream::new(self.rx)), + ) + } +} + +/// Handle that is passed on to `Notifications` and allows it to directly communicate +/// with the protocol. +#[derive(Debug, Clone)] +pub(crate) struct ProtocolHandle { + /// Protocol name. + protocol: ProtocolName, + + /// Subscribers of the notification protocol. + subscribers: Subscribers, + + /// Number of connected peers. + num_peers: usize, + + /// Delegate validation to `Peerset`. + delegate_to_peerset: bool, + + /// Prometheus metrics. + metrics: Option, +} + +pub(crate) enum ValidationCallResult { + WaitForValidation(oneshot::Receiver), + Delegated, +} + +impl ProtocolHandle { + /// Create new [`ProtocolHandle`]. + fn new(protocol: ProtocolName, subscribers: Subscribers) -> Self { + Self { protocol, subscribers, num_peers: 0usize, metrics: None, delegate_to_peerset: false } + } + + /// Set metrics. + pub fn set_metrics(&mut self, metrics: Option) { + self.metrics = metrics; + } + + /// Delegate validation to `Peerset`. + /// + /// Protocols that do not do any validation themselves and only rely on `Peerset` handling + /// validation can disable protocol-side validation entirely by delegating all validation to + /// `Peerset`. + pub fn delegate_to_peerset(&mut self, delegate: bool) { + self.delegate_to_peerset = delegate; + } + + /// Report to the protocol that a substream has been opened and it must be validated by the + /// protocol. + /// + /// Return `oneshot::Receiver` which allows `Notifications` to poll for the validation result + /// from protocol. + pub fn report_incoming_substream( + &self, + peer: PeerId, + handshake: Vec, + ) -> Result { + let subscribers = self.subscribers.lock(); + + log::trace!( + target: LOG_TARGET, + "{}: report incoming substream for {peer}, handshake {handshake:?}", + self.protocol + ); + + if self.delegate_to_peerset { + return Ok(ValidationCallResult::Delegated) + } + + // if there is only one subscriber, `Notifications` can wait directly on the + // `oneshot::channel()`'s RX half without indirection + if subscribers.len() == 1 { + let (result_tx, rx) = oneshot::channel(); + return subscribers[0] + .unbounded_send(InnerNotificationEvent::ValidateInboundSubstream { + peer, + handshake, + result_tx, + }) + .map(|_| ValidationCallResult::WaitForValidation(rx)) + .map_err(|_| ()) + } + + // if there are multiple subscribers, create a task which waits for all of the + // validations to finish and returns the combined result to `Notifications` + let mut results: FuturesUnordered<_> = subscribers + .iter() + .filter_map(|subscriber| { + let (result_tx, rx) = oneshot::channel(); + + subscriber + .unbounded_send(InnerNotificationEvent::ValidateInboundSubstream { + peer, + handshake: handshake.clone(), + result_tx, + }) + .is_ok() + .then_some(rx) + }) + .collect(); + + let (tx, rx) = oneshot::channel(); + tokio::spawn(async move { + while let Some(event) = results.next().await { + match event { + Err(_) | Ok(ValidationResult::Reject) => + return tx.send(ValidationResult::Reject), + Ok(ValidationResult::Accept) => {}, + } + } + + return tx.send(ValidationResult::Accept) + }); + + Ok(ValidationCallResult::WaitForValidation(rx)) + } + + /// Report to the protocol that a substream has been opened and that it can now use the handle + /// to send notifications to the remote peer. + pub fn report_substream_opened( + &mut self, + peer: PeerId, + direction: Direction, + handshake: Vec, + negotiated_fallback: Option, + sink: NotificationsSink, + ) -> Result<(), ()> { + metrics::register_substream_opened(&self.metrics, &self.protocol); + + let mut subscribers = self.subscribers.lock(); + log::trace!(target: LOG_TARGET, "{}: substream opened for {peer:?}", self.protocol); + + subscribers.retain(|subscriber| { + subscriber + .unbounded_send(InnerNotificationEvent::NotificationStreamOpened { + peer, + direction, + handshake: handshake.clone(), + negotiated_fallback: negotiated_fallback.clone(), + sink: sink.clone(), + }) + .is_ok() + }); + self.num_peers += 1; + + Ok(()) + } + + /// Substream was closed. + pub fn report_substream_closed(&mut self, peer: PeerId) -> Result<(), ()> { + metrics::register_substream_closed(&self.metrics, &self.protocol); + + let mut subscribers = self.subscribers.lock(); + log::trace!(target: LOG_TARGET, "{}: substream closed for {peer:?}", self.protocol); + + subscribers.retain(|subscriber| { + subscriber + .unbounded_send(InnerNotificationEvent::NotificationStreamClosed { peer }) + .is_ok() + }); + self.num_peers -= 1; + + Ok(()) + } + + /// Notification was received from the substream. + pub fn report_notification_received( + &mut self, + peer: PeerId, + notification: Vec, + ) -> Result<(), ()> { + metrics::register_notification_received(&self.metrics, &self.protocol, notification.len()); + + let mut subscribers = self.subscribers.lock(); + log::trace!(target: LOG_TARGET, "{}: notification received from {peer:?}", self.protocol); + + subscribers.retain(|subscriber| { + subscriber + .unbounded_send(InnerNotificationEvent::NotificationReceived { + peer, + notification: notification.clone(), + }) + .is_ok() + }); + + Ok(()) + } + + /// Notification sink was replaced. + pub fn report_notification_sink_replaced( + &mut self, + peer: PeerId, + sink: NotificationsSink, + ) -> Result<(), ()> { + let mut subscribers = self.subscribers.lock(); + + log::trace!( + target: LOG_TARGET, + "{}: notification sink replaced for {peer:?}", + self.protocol + ); + + subscribers.retain(|subscriber| { + subscriber + .unbounded_send(InnerNotificationEvent::NotificationSinkReplaced { + peer, + sink: sink.clone(), + }) + .is_ok() + }); + + Ok(()) + } + + /// Get the number of connected peers. + pub fn num_peers(&self) -> usize { + self.num_peers + } +} + +/// Create new (protocol, notification) handle pair. +/// +/// Handle pair allows `Notifications` and the protocol to communicate with each other directly. +pub fn notification_service( + protocol: ProtocolName, +) -> (ProtocolHandlePair, Box) { + let (cmd_tx, cmd_rx) = mpsc::channel(COMMAND_QUEUE_SIZE); + let (event_tx, event_rx) = tracing_unbounded("mpsc-notification-to-protocol", 100_000); + let subscribers = Arc::new(Mutex::new(vec![event_tx])); + + ( + ProtocolHandlePair::new(protocol.clone(), subscribers.clone(), cmd_rx), + Box::new(NotificationHandle::new(protocol.clone(), cmd_tx, event_rx, subscribers)), + ) +} diff --git a/substrate/client/network/src/protocol/notifications/service/tests.rs b/substrate/client/network/src/protocol/notifications/service/tests.rs new file mode 100644 index 00000000000..02ba9e1711c --- /dev/null +++ b/substrate/client/network/src/protocol/notifications/service/tests.rs @@ -0,0 +1,839 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::*; +use crate::protocol::notifications::handler::{ + NotificationsSinkMessage, ASYNC_NOTIFICATIONS_BUFFER_SIZE, +}; + +use std::future::Future; + +#[tokio::test] +async fn validate_and_accept_substream() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (handle, _stream) = proto.split(); + + let peer_id = PeerId::random(); + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); +} + +#[tokio::test] +async fn substream_opened() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, _, _) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + + let peer_id = PeerId::random(); + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } +} + +#[tokio::test] +async fn send_sync_notification() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, _, mut sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]); + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }) + ); +} + +#[tokio::test] +async fn send_async_notification() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, mut async_rx, _) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap(); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }) + ); +} + +#[tokio::test] +async fn send_sync_notification_to_non_existent_peer() { + let (proto, notif) = notification_service("/proto/1".into()); + let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random()); + let (_handle, _stream) = proto.split(); + let peer = PeerId::random(); + + // as per the original implementation, the call doesn't fail + notif.send_sync_notification(&peer, vec![1, 3, 3, 7]) +} + +#[tokio::test] +async fn send_async_notification_to_non_existent_peer() { + let (proto, notif) = notification_service("/proto/1".into()); + let (_sink, _, _sync_rx) = NotificationsSink::new(PeerId::random()); + let (_handle, _stream) = proto.split(); + let peer = PeerId::random(); + + if let Err(error::Error::PeerDoesntExist(peer_id)) = + notif.send_async_notification(&peer, vec![1, 3, 3, 7]).await + { + assert_eq!(peer, peer_id); + } else { + panic!("invalid error received from `send_async_notification()`"); + } +} + +#[tokio::test] +async fn receive_notification() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, _, _sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // notification is received + handle.report_notification_received(peer_id, vec![1, 3, 3, 8]).unwrap(); + + if let Some(NotificationEvent::NotificationReceived { peer, notification }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(notification, vec![1, 3, 3, 8]); + } else { + panic!("invalid event received"); + } +} + +#[tokio::test] +async fn backpressure_works() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, mut async_rx, _) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // fill the message buffer with messages + for i in 0..=ASYNC_NOTIFICATIONS_BUFFER_SIZE { + assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, i as u8])) + .is_ready()); + } + + // try to send one more message and verify that the call blocks + assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_pending()); + + // release one slot from the buffer for new message + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 0] }) + ); + + // verify that a message can be sent + assert!(futures::poll!(notif.send_async_notification(&peer_id, vec![1, 3, 3, 9])).is_ready()); +} + +#[tokio::test] +async fn peer_disconnects_then_sync_notification_is_sent() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, _, sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // report that a substream has been closed but don't poll `notif` to receive this + // information + handle.report_substream_closed(peer_id).unwrap(); + drop(sync_rx); + + // as per documentation, error is not reported but the notification is silently dropped + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 7]); +} + +#[tokio::test] +async fn peer_disconnects_then_async_notification_is_sent() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, async_rx, _) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // report that a substream has been closed but don't poll `notif` to receive this + // information + handle.report_substream_closed(peer_id).unwrap(); + drop(async_rx); + + // as per documentation, error is not reported but the notification is silently dropped + if let Err(error::Error::ConnectionClosed) = + notif.send_async_notification(&peer_id, vec![1, 3, 3, 7]).await + { + } else { + panic!("invalid state after calling `send_async_notificatio()` on closed connection") + } +} + +#[tokio::test] +async fn cloned_service_opening_substream_works() { + let (proto, mut notif1) = notification_service("/proto/1".into()); + let (_sink, _async_rx, _) = NotificationsSink::new(PeerId::random()); + let (handle, _stream) = proto.split(); + let mut notif2 = notif1.clone().unwrap(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(mut result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + // verify that `notif1` gets the event + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif1.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + + // verify that because only one listener has thus far send their result, the result is + // pending + assert!(result_rx.try_recv().is_err()); + + // verify that `notif2` also gets the event + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif2.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); +} + +#[tokio::test] +async fn cloned_service_one_service_rejects_substream() { + let (proto, mut notif1) = notification_service("/proto/1".into()); + let (_sink, _async_rx, _) = NotificationsSink::new(PeerId::random()); + let (handle, _stream) = proto.split(); + let mut notif2 = notif1.clone().unwrap(); + let mut notif3 = notif2.clone().unwrap(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(mut result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + for notif in vec![&mut notif1, &mut notif2] { + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + } + + // `notif3` has not yet sent their validation result + assert!(result_rx.try_recv().is_err()); + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif3.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Reject).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Reject); +} + +#[tokio::test] +async fn cloned_service_opening_substream_sending_and_receiving_notifications_work() { + let (proto, mut notif1) = notification_service("/proto/1".into()); + let (sink, _, mut sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let mut notif2 = notif1.clone().unwrap(); + let mut notif3 = notif1.clone().unwrap(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + for notif in vec![&mut notif1, &mut notif2, &mut notif3] { + // accept the inbound substream for all services + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that then notification stream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + for notif in vec![&mut notif1, &mut notif2, &mut notif3] { + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + } + // receive a notification from peer and verify all services receive it + handle.report_notification_received(peer_id, vec![1, 3, 3, 8]).unwrap(); + + for notif in vec![&mut notif1, &mut notif2, &mut notif3] { + if let Some(NotificationEvent::NotificationReceived { peer, notification }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(notification, vec![1, 3, 3, 8]); + } else { + panic!("invalid event received"); + } + } + + for (i, notif) in vec![&mut notif1, &mut notif2, &mut notif3].iter().enumerate() { + // send notification from each service and verify peer receives it + notif.send_sync_notification(&peer_id, vec![1, 3, 3, i as u8]); + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, i as u8] }) + ); + } + + // close the substream for peer and verify all services receive the event + handle.report_substream_closed(peer_id).unwrap(); + + for notif in vec![&mut notif1, &mut notif2, &mut notif3] { + if let Some(NotificationEvent::NotificationStreamClosed { peer }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + } else { + panic!("invalid event received"); + } + } +} + +#[tokio::test] +async fn sending_notifications_using_notifications_sink_works() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, mut async_rx, mut sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // get a copy of the notification sink and send a synchronous notification using. + let sink = notif.message_sink(&peer_id).unwrap(); + sink.send_sync_notification(vec![1, 3, 3, 6]); + + // send an asynchronous notification using the acquired notifications sink. + let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap(); + + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }), + ); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }), + ); + + // send notifications using the stored notification sink as well. + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]); + notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap(); + + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }), + ); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }), + ); +} + +#[test] +fn try_to_get_notifications_sink_for_non_existent_peer() { + let (_proto, notif) = notification_service("/proto/1".into()); + assert!(notif.message_sink(&PeerId::random()).is_none()); +} + +#[tokio::test] +async fn notification_sink_replaced() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (sink, mut async_rx, mut sync_rx) = NotificationsSink::new(PeerId::random()); + let (mut handle, _stream) = proto.split(); + let peer_id = PeerId::random(); + + // validate inbound substream + let ValidationCallResult::WaitForValidation(result_rx) = + handle.report_incoming_substream(peer_id, vec![1, 3, 3, 7]).unwrap() + else { + panic!("peerset not enabled"); + }; + + if let Some(NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx }) = + notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(handshake, vec![1, 3, 3, 7]); + let _ = result_tx.send(ValidationResult::Accept).unwrap(); + } else { + panic!("invalid event received"); + } + assert_eq!(result_rx.await.unwrap(), ValidationResult::Accept); + + // report that a substream has been opened + handle + .report_substream_opened(peer_id, Direction::Inbound, vec![1, 3, 3, 7], None, sink) + .unwrap(); + + if let Some(NotificationEvent::NotificationStreamOpened { + peer, + negotiated_fallback, + handshake, + direction, + }) = notif.next_event().await + { + assert_eq!(peer_id, peer); + assert_eq!(negotiated_fallback, None); + assert_eq!(handshake, vec![1, 3, 3, 7]); + assert_eq!(direction, Direction::Inbound); + } else { + panic!("invalid event received"); + } + + // get a copy of the notification sink and send a synchronous notification using. + let sink = notif.message_sink(&peer_id).unwrap(); + sink.send_sync_notification(vec![1, 3, 3, 6]); + + // send an asynchronous notification using the acquired notifications sink. + let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap(); + + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }), + ); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }), + ); + + // send notifications using the stored notification sink as well. + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]); + notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap(); + + assert_eq!( + sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }), + ); + assert_eq!( + async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }), + ); + + // the initial connection was closed and `Notifications` switched to secondary connection + // and emitted `CustomProtocolReplaced` which informs the local `NotificationService` that + // the notification sink was replaced. + let (new_sink, mut new_async_rx, mut new_sync_rx) = NotificationsSink::new(PeerId::random()); + handle.report_notification_sink_replaced(peer_id, new_sink).unwrap(); + + // drop the old sinks and poll `notif` once to register the sink replacement + drop(sync_rx); + drop(async_rx); + + futures::future::poll_fn(|cx| { + let _ = std::pin::Pin::new(&mut notif.next_event()).poll(cx); + std::task::Poll::Ready(()) + }) + .await; + + // verify that using the `NotificationService` API automatically results in using the correct + // sink + notif.send_sync_notification(&peer_id, vec![1, 3, 3, 8]); + notif.send_async_notification(&peer_id, vec![1, 3, 3, 9]).await.unwrap(); + + assert_eq!( + new_sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 8] }), + ); + assert_eq!( + new_async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 9] }), + ); + + // now send two notifications using the acquired message sink and verify that + // it's also updated + sink.send_sync_notification(vec![1, 3, 3, 6]); + + // send an asynchronous notification using the acquired notifications sink. + let _ = sink.send_async_notification(vec![1, 3, 3, 7]).await.unwrap(); + + assert_eq!( + new_sync_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 6] }), + ); + assert_eq!( + new_async_rx.next().await, + Some(NotificationsSinkMessage::Notification { message: vec![1, 3, 3, 7] }), + ); +} + +#[tokio::test] +async fn set_handshake() { + let (proto, mut notif) = notification_service("/proto/1".into()); + let (_handle, mut stream) = proto.split(); + + assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_ok()); + + match stream.next().await { + Some(NotificationCommand::SetHandshake(handshake)) => { + assert_eq!(handshake, vec![1, 3, 3, 7]); + }, + _ => panic!("invalid event received"), + } + + for _ in 0..COMMAND_QUEUE_SIZE { + assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_ok()); + } + + assert!(notif.try_set_handshake(vec![1, 3, 3, 7]).is_err()); +} diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index d57c24144f5..92d269f89c3 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -22,6 +22,7 @@ use crate::{ peer_store::PeerStore, protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig}, protocol_controller::{ProtoSetConfig, ProtocolController, SetId}, + service::traits::{NotificationEvent, ValidationResult}, }; use futures::{future::BoxFuture, prelude::*}; @@ -70,6 +71,8 @@ fn build_nodes() -> (Swarm, Swarm) { .timeout(Duration::from_secs(20)) .boxed(); + let (protocol_handle_pair, mut notif_service) = + crate::protocol::notifications::service::notification_service("/foo".into()); let peer_store = PeerStore::new(if index == 0 { keypairs.iter().skip(1).map(|keypair| keypair.public().to_peer_id()).collect() } else { @@ -91,16 +94,22 @@ fn build_nodes() -> (Swarm, Swarm) { Box::new(peer_store.handle()), ); + let (notif_handle, command_stream) = protocol_handle_pair.split(); let behaviour = CustomProtoWithAddr { inner: Notifications::new( vec![controller_handle], from_controller, - iter::once(ProtocolConfig { - name: "/foo".into(), - fallback_names: Vec::new(), - handshake: Vec::new(), - max_notification_size: 1024 * 1024, - }), + &None, + iter::once(( + ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: Vec::new(), + max_notification_size: 1024 * 1024, + }, + notif_handle, + command_stream, + )), ), peer_store_future: peer_store.run().boxed(), protocol_controller_future: controller.run().boxed(), @@ -118,6 +127,16 @@ fn build_nodes() -> (Swarm, Swarm) { }; let runtime = tokio::runtime::Runtime::new().unwrap(); + runtime.spawn(async move { + loop { + if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = + notif_service.next_event().await.unwrap() + { + result_tx.send(ValidationResult::Accept).unwrap(); + } + } + }); + let mut swarm = SwarmBuilder::with_executor( transport, behaviour, diff --git a/substrate/client/network/src/protocol_controller.rs b/substrate/client/network/src/protocol_controller.rs index 3a305011ded..4c8f119baa2 100644 --- a/substrate/client/network/src/protocol_controller.rs +++ b/substrate/client/network/src/protocol_controller.rs @@ -847,6 +847,7 @@ mod tests { use super::*; use crate::{peer_store::PeerStoreProvider, ReputationChange}; use libp2p::PeerId; + use sc_network_common::role::ObservedRole; use sc_utils::mpsc::{tracing_unbounded, TryRecvError}; use std::collections::HashSet; @@ -858,8 +859,10 @@ mod tests { fn is_banned(&self, peer_id: &PeerId) -> bool; fn register_protocol(&self, protocol_handle: ProtocolHandle); fn report_disconnect(&mut self, peer_id: PeerId); + fn set_peer_role(&mut self, peer_id: &PeerId, role: ObservedRole); fn report_peer(&mut self, peer_id: PeerId, change: ReputationChange); fn peer_reputation(&self, peer_id: &PeerId) -> i32; + fn peer_role(&self, peer_id: &PeerId) -> Option; fn outgoing_candidates<'a>(&self, count: usize, ignored: HashSet<&'a PeerId>) -> Vec; } } diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index c1df48ad785..43a3ab09115 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -54,6 +54,7 @@ use crate::{ ReputationChange, }; +use codec::DecodeAll; use either::Either; use futures::{channel::oneshot, prelude::*}; #[allow(deprecated)] @@ -71,10 +72,13 @@ use libp2p::{ Multiaddr, PeerId, }; use log::{debug, error, info, trace, warn}; -use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; +use metrics::{Histogram, MetricSources, Metrics}; use parking_lot::Mutex; -use sc_network_common::ExHashT; +use sc_network_common::{ + role::{ObservedRole, Roles}, + ExHashT, +}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_runtime::traits::Block as BlockT; @@ -118,12 +122,6 @@ pub struct NetworkService { bandwidth: Arc, /// Channel that sends messages to the actual worker. to_worker: TracingUnboundedSender, - /// For each peer and protocol combination, an object that allows sending notifications to - /// that peer. Updated by the [`NetworkWorker`]. - peers_notifications_sinks: Arc>>, - /// Field extracted from the [`Metrics`] struct and necessary to report the - /// notifications-related metrics. - notifications_sizes_metric: Option, /// Protocol name -> `SetId` mapping for notification protocols. The map never changes after /// initialization. notification_protocol_ids: HashMap, @@ -132,6 +130,8 @@ pub struct NetworkService { protocol_handles: Vec, /// Shortcut to sync protocol handle (`protocol_handles[0]`). sync_protocol_handle: protocol_controller::ProtocolHandle, + /// Handle to `PeerStore`. + peer_store_handle: PeerStoreHandle, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, @@ -199,7 +199,7 @@ where )?; for notification_protocol in ¬ification_protocols { ensure_addresses_consistent_with_transport( - notification_protocol.set_config.reserved_nodes.iter().map(|x| &x.multiaddr), + notification_protocol.set_config().reserved_nodes.iter().map(|x| &x.multiaddr), &network_config.transport, )?; } @@ -241,7 +241,7 @@ where .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX)); let notifs_max = notification_protocols .iter() - .map(|cfg| usize::try_from(cfg.max_notification_size).unwrap_or(usize::MAX)); + .map(|cfg| usize::try_from(cfg.max_notification_size()).unwrap_or(usize::MAX)); // A "default" max is added to cover all the other protocols: ping, identify, // kademlia, block announces, and transactions. @@ -273,7 +273,7 @@ where // We must prepend a hardcoded default peer set to notification protocols. let all_peer_sets_iter = iter::once(&network_config.default_peers_set) - .chain(notification_protocols.iter().map(|protocol| &protocol.set_config)); + .chain(notification_protocols.iter().map(|protocol| protocol.set_config())); let (protocol_handles, protocol_controllers): (Vec<_>, Vec<_>) = all_peer_sets_iter .enumerate() @@ -312,21 +312,9 @@ where iter::once(¶ms.block_announce_config) .chain(notification_protocols.iter()) .enumerate() - .map(|(index, protocol)| { - (protocol.notifications_protocol.clone(), SetId::from(index)) - }) + .map(|(index, protocol)| (protocol.protocol_name().clone(), SetId::from(index))) .collect(); - let protocol = Protocol::new( - From::from(¶ms.role), - notification_protocols.clone(), - params.block_announce_config, - params.peer_store.clone(), - protocol_handles.clone(), - from_protocol_controllers, - params.tx, - )?; - let known_addresses = { // Collect all reserved nodes and bootnodes addresses. let mut addresses: Vec<_> = network_config @@ -336,7 +324,7 @@ where .map(|reserved| (reserved.peer_id, reserved.multiaddr.clone())) .chain(notification_protocols.iter().flat_map(|protocol| { protocol - .set_config + .set_config() .reserved_nodes .iter() .map(|reserved| (reserved.peer_id, reserved.multiaddr.clone())) @@ -389,6 +377,16 @@ where let num_connected = Arc::new(AtomicUsize::new(0)); let external_addresses = Arc::new(Mutex::new(HashSet::new())); + let (protocol, notif_protocol_handles) = Protocol::new( + From::from(¶ms.role), + ¶ms.metrics_registry, + notification_protocols, + params.block_announce_config, + params.peer_store.clone(), + protocol_handles.clone(), + from_protocol_controllers, + )?; + // Build the swarm. let (mut swarm, bandwidth): (Swarm>, _) = { let user_agent = @@ -508,7 +506,6 @@ where } let listen_addresses = Arc::new(Mutex::new(HashSet::new())); - let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new())); let service = Arc::new(NetworkService { bandwidth, @@ -518,13 +515,10 @@ where local_peer_id, local_identity, to_worker, - peers_notifications_sinks: peers_notifications_sinks.clone(), - notifications_sizes_metric: metrics - .as_ref() - .map(|metrics| metrics.notifications_sizes.clone()), notification_protocol_ids, protocol_handles, sync_protocol_handle, + peer_store_handle: params.peer_store.clone(), _marker: PhantomData, _block: Default::default(), }); @@ -539,8 +533,8 @@ where metrics, boot_node_ids, reported_invalid_boot_nodes: Default::default(), - peers_notifications_sinks, peer_store_handle: params.peer_store, + notif_protocol_handles, _marker: Default::default(), _block: Default::default(), }) @@ -567,7 +561,7 @@ where /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { - self.network_service.behaviour().user_protocol().num_connected_peers() + self.network_service.behaviour().user_protocol().num_sync_peers() } /// Adds an address for a node. @@ -991,6 +985,16 @@ where fn sync_num_connected(&self) -> usize { self.num_connected.load(Ordering::Relaxed) } + + fn peer_role(&self, peer_id: PeerId, handshake: Vec) -> Option { + match Roles::decode_all(&mut &handshake[..]) { + Ok(role) => Some(role.into()), + Err(_) => { + log::debug!(target: "sub-libp2p", "handshake doesn't contain peer role: {handshake:?}"); + self.peer_store_handle.peer_role(&peer_id) + }, + } + } } impl NetworkEventStream for NetworkService @@ -1010,68 +1014,20 @@ where B: BlockT + 'static, H: ExHashT, { - fn write_notification(&self, target: PeerId, protocol: ProtocolName, message: Vec) { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - // Notification silently discarded, as documented. - debug!( - target: "sub-libp2p", - "Attempted to send notification on missing or closed substream: {}, {:?}", - target, protocol, - ); - return - } - }; - - if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { - notifications_sizes_metric - .with_label_values(&["out", &protocol]) - .observe(message.len() as f64); - } - - // Sending is communicated to the `NotificationsSink`. - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?}, {} bytes)", - target, protocol, message.len() - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Sync notification", target); - sink.send_sync_notification(message); + fn write_notification(&self, _target: PeerId, _protocol: ProtocolName, _message: Vec) { + unimplemented!(); } fn notification_sender( &self, - target: PeerId, - protocol: ProtocolName, + _target: PeerId, + _protocol: ProtocolName, ) -> Result, NotificationSenderError> { - // We clone the `NotificationsSink` in order to be able to unlock the network-wide - // `peers_notifications_sinks` mutex as soon as possible. - let sink = { - let peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { - sink.clone() - } else { - return Err(NotificationSenderError::Closed) - } - }; - - let notification_size_metric = self - .notifications_sizes_metric - .as_ref() - .map(|histogram| histogram.with_label_values(&["out", &protocol])); - - Ok(Box::new(NotificationSender { sink, protocol_name: protocol, notification_size_metric })) + unimplemented!(); } - fn set_notification_handshake(&self, protocol: ProtocolName, handshake: Vec) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake)); + fn set_notification_handshake(&self, _protocol: ProtocolName, _handshake: Vec) { + unimplemented!(); } } @@ -1209,7 +1165,6 @@ enum ServiceToWorkerMsg { pending_response: oneshot::Sender>, }, DisconnectPeer(PeerId, ProtocolName), - SetNotificationHandshake(ProtocolName, Vec), } /// Main network worker. Must be polled in order for the network to advance. @@ -1239,11 +1194,10 @@ where boot_node_ids: Arc>>, /// Boot nodes that we already have reported as invalid. reported_invalid_boot_nodes: HashSet, - /// For each peer and protocol combination, an object that allows sending notifications to - /// that peer. Shared with the [`NetworkService`]. - peers_notifications_sinks: Arc>>, /// Peer reputation store handle. peer_store_handle: PeerStoreHandle, + /// Notification protocol handles. + notif_protocol_handles: Vec, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, @@ -1282,8 +1236,7 @@ where }; // Update the `num_connected` count shared with the `NetworkService`. - let num_connected_peers = - self.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); + let num_connected_peers = self.network_service.behaviour().user_protocol().num_sync_peers(); self.num_connected.store(num_connected_peers, Ordering::Relaxed); if let Some(metrics) = self.metrics.as_ref() { @@ -1353,11 +1306,6 @@ where .behaviour_mut() .user_protocol_mut() .disconnect_peer(&who, protocol_name), - ServiceToWorkerMsg::SetNotificationHandshake(protocol, handshake) => self - .network_service - .behaviour_mut() - .user_protocol_mut() - .set_notification_handshake(protocol, handshake), } } @@ -1472,47 +1420,27 @@ where }, SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, - protocol, + set_id, + direction, negotiated_fallback, notifications_sink, - role, received_handshake, }) => { - if let Some(metrics) = self.metrics.as_ref() { - metrics - .notifications_streams_opened_total - .with_label_values(&[&protocol]) - .inc(); - } - { - let mut peers_notifications_sinks = self.peers_notifications_sinks.lock(); - let _previous_value = peers_notifications_sinks - .insert((remote, protocol.clone()), notifications_sink); - debug_assert!(_previous_value.is_none()); - } - self.event_streams.send(Event::NotificationStreamOpened { + let _ = self.notif_protocol_handles[usize::from(set_id)].report_substream_opened( remote, - protocol, - negotiated_fallback, - role, + direction, received_handshake, - }); + negotiated_fallback, + notifications_sink, + ); }, SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { remote, - protocol, + set_id, notifications_sink, }) => { - let mut peers_notifications_sinks = self.peers_notifications_sinks.lock(); - if let Some(s) = peers_notifications_sinks.get_mut(&(remote, protocol)) { - *s = notifications_sink; - } else { - error!( - target: "sub-libp2p", - "NotificationStreamReplaced for non-existing substream" - ); - debug_assert!(false); - } + let _ = self.notif_protocol_handles[usize::from(set_id)] + .report_notification_sink_replaced(remote, notifications_sink); // TODO: Notifications might have been lost as a result of the previous // connection being dropped, and as a result it would be preferable to notify @@ -1535,31 +1463,17 @@ where // role, // }); }, - SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, protocol }) => { - if let Some(metrics) = self.metrics.as_ref() { - metrics - .notifications_streams_closed_total - .with_label_values(&[&protocol[..]]) - .inc(); - } - self.event_streams - .send(Event::NotificationStreamClosed { remote, protocol: protocol.clone() }); - { - let mut peers_notifications_sinks = self.peers_notifications_sinks.lock(); - let _previous_value = peers_notifications_sinks.remove(&(remote, protocol)); - debug_assert!(_previous_value.is_some()); - } + SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, set_id }) => { + let _ = self.notif_protocol_handles[usize::from(set_id)] + .report_substream_closed(remote); }, - SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages }) => { - if let Some(metrics) = self.metrics.as_ref() { - for (protocol, message) in &messages { - metrics - .notifications_sizes - .with_label_values(&["in", protocol]) - .observe(message.len() as f64); - } - } - self.event_streams.send(Event::NotificationsReceived { remote, messages }); + SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { + remote, + set_id, + notification, + }) => { + let _ = self.notif_protocol_handles[usize::from(set_id)] + .report_notification_received(remote, notification); }, SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration)) => { if let Some(metrics) = self.metrics.as_ref() { diff --git a/substrate/client/network/src/service/metrics.rs b/substrate/client/network/src/service/metrics.rs index 13bc4b4e7af..c349fd98c76 100644 --- a/substrate/client/network/src/service/metrics.rs +++ b/substrate/client/network/src/service/metrics.rs @@ -61,9 +61,6 @@ pub struct Metrics { pub kbuckets_num_nodes: GaugeVec, pub listeners_local_addresses: Gauge, pub listeners_errors_total: Counter, - pub notifications_sizes: HistogramVec, - pub notifications_streams_closed_total: CounterVec, - pub notifications_streams_opened_total: CounterVec, pub peerset_num_discovered: Gauge, pub pending_connections: Gauge, pub pending_connections_errors_total: CounterVec, @@ -153,31 +150,6 @@ impl Metrics { "substrate_sub_libp2p_listeners_errors_total", "Total number of non-fatal errors reported by a listener" )?, registry)?, - notifications_sizes: prometheus::register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "substrate_sub_libp2p_notifications_sizes", - "Sizes of the notifications send to and received from all nodes" - ), - buckets: prometheus::exponential_buckets(64.0, 4.0, 8) - .expect("parameters are always valid values; qed"), - }, - &["direction", "protocol"] - )?, registry)?, - notifications_streams_closed_total: prometheus::register(CounterVec::new( - Opts::new( - "substrate_sub_libp2p_notifications_streams_closed_total", - "Total number of notification substreams that have been closed" - ), - &["protocol"] - )?, registry)?, - notifications_streams_opened_total: prometheus::register(CounterVec::new( - Opts::new( - "substrate_sub_libp2p_notifications_streams_opened_total", - "Total number of notification substreams that have been opened" - ), - &["protocol"] - )?, registry)?, peerset_num_discovered: prometheus::register(Gauge::new( "substrate_sub_libp2p_peerset_num_discovered", "Number of nodes stored in the peerset manager", diff --git a/substrate/client/network/src/service/signature.rs b/substrate/client/network/src/service/signature.rs index 024f60e4c46..5b2ba6be8cf 100644 --- a/substrate/client/network/src/service/signature.rs +++ b/substrate/client/network/src/service/signature.rs @@ -18,6 +18,8 @@ // // If you read this, you are very thorough, congratulations. +//! Signature-related code + use libp2p::{ identity::{Keypair, PublicKey}, PeerId, diff --git a/substrate/client/network/src/service/traits.rs b/substrate/client/network/src/service/traits.rs index bed325ede4a..f66e810be11 100644 --- a/substrate/client/network/src/service/traits.rs +++ b/substrate/client/network/src/service/traits.rs @@ -18,8 +18,11 @@ // // If you read this, you are very thorough, congratulations. +//! Traits defined by `sc-network`. + use crate::{ config::MultiaddrWithPeerId, + error, event::Event, request_responses::{IfDisconnected, RequestFailure}, service::signature::Signature, @@ -30,7 +33,9 @@ use crate::{ use futures::{channel::oneshot, Stream}; use libp2p::{Multiaddr, PeerId}; -use std::{collections::HashSet, future::Future, pin::Pin, sync::Arc}; +use sc_network_common::role::ObservedRole; + +use std::{collections::HashSet, fmt::Debug, future::Future, pin::Pin, sync::Arc}; pub use libp2p::{identity::SigningError, kad::record::Key as KademliaKey}; @@ -221,6 +226,14 @@ pub trait NetworkPeers { /// Returns the number of peers in the sync peer set we're connected to. fn sync_num_connected(&self) -> usize; + + /// Attempt to get peer role. + /// + /// Right now the peer role is decoded from the received handshake for all protocols + /// (`/block-announces/1` has other information as well). If the handshake cannot be + /// decoded into a role, the role queried from `PeerStore` and if the role is not stored + /// there either, `None` is returned and the peer should be discarded. + fn peer_role(&self, peer_id: PeerId, handshake: Vec) -> Option; } // Manual implementation to avoid extra boxing here @@ -296,6 +309,10 @@ where fn sync_num_connected(&self) -> usize { T::sync_num_connected(self) } + + fn peer_role(&self, peer_id: PeerId, handshake: Vec) -> Option { + T::peer_role(self, peer_id, handshake) + } } /// Provides access to network-level event stream. @@ -611,3 +628,189 @@ where T::new_best_block_imported(self, hash, number) } } + +/// Substream acceptance result. +#[derive(Debug, PartialEq, Eq)] +pub enum ValidationResult { + /// Accept inbound substream. + Accept, + + /// Reject inbound substream. + Reject, +} + +/// Substream direction. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Direction { + /// Substream opened by the remote node. + Inbound, + + /// Substream opened by the local node. + Outbound, +} + +impl Direction { + /// Is the direction inbound. + pub fn is_inbound(&self) -> bool { + std::matches!(self, Direction::Inbound) + } +} + +/// Events received by the protocol from `Notifications`. +#[derive(Debug)] +pub enum NotificationEvent { + /// Validate inbound substream. + ValidateInboundSubstream { + /// Peer ID. + peer: PeerId, + + /// Received handshake. + handshake: Vec, + + /// `oneshot::Sender` for sending validation result back to `Notifications` + result_tx: tokio::sync::oneshot::Sender, + }, + + /// Remote identified by `PeerId` opened a substream and sent `Handshake`. + /// Validate `Handshake` and report status (accept/reject) to `Notifications`. + NotificationStreamOpened { + /// Peer ID. + peer: PeerId, + + /// Is the substream inbound or outbound. + direction: Direction, + + /// Received handshake. + handshake: Vec, + + /// Negotiated fallback. + negotiated_fallback: Option, + }, + + /// Substream was closed. + NotificationStreamClosed { + /// Peer Id. + peer: PeerId, + }, + + /// Notification was received from the substream. + NotificationReceived { + /// Peer ID. + peer: PeerId, + + /// Received notification. + notification: Vec, + }, +} + +/// Notification service +/// +/// Defines behaviors that both the protocol implementations and `Notifications` can expect from +/// each other. +/// +/// `Notifications` can send two different kinds of information to protocol: +/// * substream-related information +/// * notification-related information +/// +/// When an unvalidated, inbound substream is received by `Notifications`, it sends the inbound +/// stream information (peer ID, handshake) to protocol for validation. Protocol must then verify +/// that the handshake is valid (and in the future that it has a slot it can allocate for the peer) +/// and then report back the `ValidationResult` which is either `Accept` or `Reject`. +/// +/// After the validation result has been received by `Notifications`, it prepares the +/// substream for communication by initializing the necessary sinks and emits +/// `NotificationStreamOpened` which informs the protocol that the remote peer is ready to receive +/// notifications. +/// +/// Two different flavors of sending options are provided: +/// * synchronous sending ([`NotificationService::send_sync_notification()`]) +/// * asynchronous sending ([`NotificationService::send_async_notification()`]) +/// +/// The former is used by the protocols not ready to exercise backpressure and the latter by the +/// protocols that can do it. +/// +/// Both local and remote peer can close the substream at any time. Local peer can do so by calling +/// [`NotificationService::close_substream()`] which instructs `Notifications` to close the +/// substream. Remote closing the substream is indicated to the local peer by receiving +/// [`NotificationEvent::NotificationStreamClosed`] event. +/// +/// In case the protocol must update its handshake while it's operating (such as updating the best +/// block information), it can do so by calling [`NotificationService::set_handshake()`] +/// which instructs `Notifications` to update the handshake it stored during protocol +/// initialization. +/// +/// All peer events are multiplexed on the same incoming event stream from `Notifications` and thus +/// each event carries a `PeerId` so the protocol knows whose information to update when receiving +/// an event. +#[async_trait::async_trait] +pub trait NotificationService: Debug + Send { + /// Instruct `Notifications` to open a new substream for `peer`. + /// + /// `dial_if_disconnected` informs `Notifications` whether to dial + // the peer if there is currently no active connection to it. + // + // NOTE: not offered by the current implementation + async fn open_substream(&mut self, peer: PeerId) -> Result<(), ()>; + + /// Instruct `Notifications` to close substream for `peer`. + // + // NOTE: not offered by the current implementation + async fn close_substream(&mut self, peer: PeerId) -> Result<(), ()>; + + /// Send synchronous `notification` to `peer`. + fn send_sync_notification(&self, peer: &PeerId, notification: Vec); + + /// Send asynchronous `notification` to `peer`, allowing sender to exercise backpressure. + /// + /// Returns an error if the peer doesn't exist. + async fn send_async_notification( + &self, + peer: &PeerId, + notification: Vec, + ) -> Result<(), error::Error>; + + /// Set handshake for the notification protocol replacing the old handshake. + async fn set_handshake(&mut self, handshake: Vec) -> Result<(), ()>; + + /// Non-blocking variant of `set_handshake()` that attempts to update the handshake + /// and returns an error if the channel is blocked. + /// + /// Technically the function can return an error if the channel to `Notifications` is closed + /// but that doesn't happen under normal operation. + fn try_set_handshake(&mut self, handshake: Vec) -> Result<(), ()>; + + /// Get next event from the `Notifications` event stream. + async fn next_event(&mut self) -> Option; + + /// Make a copy of the object so it can be shared between protocol components + /// who wish to have access to the same underlying notification protocol. + fn clone(&mut self) -> Result, ()>; + + /// Get protocol name of the `NotificationService`. + fn protocol(&self) -> &ProtocolName; + + /// Get message sink of the peer. + fn message_sink(&self, peer: &PeerId) -> Option>; +} + +/// Message sink for peers. +/// +/// If protocol cannot use [`NotificationService`] to send notifications to peers and requires, +/// e.g., notifications to be sent in another task, the protocol may acquire a [`MessageSink`] +/// object for each peer by calling [`NotificationService::message_sink()`]. Calling this +/// function returns an object which allows the protocol to send notifications to the remote peer. +/// +/// Use of this API is discouraged as it's not as performant as sending notifications through +/// [`NotificationService`] due to synchronization required to keep the underlying notification +/// sink up to date with possible sink replacement events. +#[async_trait::async_trait] +pub trait MessageSink: Send + Sync { + /// Send synchronous `notification` to the peer associated with this [`MessageSink`]. + fn send_sync_notification(&self, notification: Vec); + + /// Send an asynchronous `notification` to to the peer associated with this [`MessageSink`], + /// allowing sender to exercise backpressure. + /// + /// Returns an error if the peer does not exist. + async fn send_async_notification(&self, notification: Vec) -> Result<(), error::Error>; +} diff --git a/substrate/client/network/statement/src/lib.rs b/substrate/client/network/statement/src/lib.rs index 69d4faa13ef..5187e681d83 100644 --- a/substrate/client/network/statement/src/lib.rs +++ b/substrate/client/network/statement/src/lib.rs @@ -21,12 +21,13 @@ //! Usage: //! //! - Use [`StatementHandlerPrototype::new`] to create a prototype. -//! - Pass the return value of [`StatementHandlerPrototype::set_config`] to the network -//! configuration as an extra peers set. +//! - Pass the `NonDefaultSetConfig` returned from [`StatementHandlerPrototype::new`] to the network +//! configuration as an extra peers set. //! - Use [`StatementHandlerPrototype::build`] then [`StatementHandler::run`] to obtain a //! `Future` that processes statements. use crate::config::*; + use codec::{Decode, Encode}; use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt}; use libp2p::{multiaddr, PeerId}; @@ -34,7 +35,7 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig}, error, - event::Event, + service::traits::{NotificationEvent, NotificationService, ValidationResult}, types::ProtocolName, utils::{interval, LruHashSet}, NetworkEventStream, NetworkNotification, NetworkPeers, @@ -101,35 +102,35 @@ impl Metrics { /// Prototype for a [`StatementHandler`]. pub struct StatementHandlerPrototype { protocol_name: ProtocolName, + notification_service: Box, } impl StatementHandlerPrototype { /// Create a new instance. - pub fn new>(genesis_hash: Hash, fork_id: Option<&str>) -> Self { + pub fn new>( + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> (Self, NonDefaultSetConfig) { let genesis_hash = genesis_hash.as_ref(); let protocol_name = if let Some(fork_id) = fork_id { format!("/{}/{}/statement/1", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { format!("/{}/statement/1", array_bytes::bytes2hex("", genesis_hash)) }; - - Self { protocol_name: protocol_name.into() } - } - - /// Returns the configuration of the set to put in the network configuration. - pub fn set_config(&self) -> NonDefaultSetConfig { - NonDefaultSetConfig { - notifications_protocol: self.protocol_name.clone(), - fallback_names: Vec::new(), - max_notification_size: MAX_STATEMENT_SIZE, - handshake: None, - set_config: SetConfig { + let (config, notification_service) = NonDefaultSetConfig::new( + protocol_name.clone().into(), + Vec::new(), + MAX_STATEMENT_SIZE, + None, + SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Deny, }, - } + ); + + (Self { protocol_name: protocol_name.into(), notification_service }, config) } /// Turns the prototype into the actual handler. @@ -147,7 +148,6 @@ impl StatementHandlerPrototype { metrics_registry: Option<&Registry>, executor: impl Fn(Pin + Send>>) + Send, ) -> error::Result> { - let net_event_stream = network.event_stream("statement-handler-net"); let sync_event_stream = sync.event_stream("statement-handler-sync"); let (queue_sender, mut queue_receiver) = async_channel::bounded(100_000); @@ -176,6 +176,7 @@ impl StatementHandlerPrototype { let handler = StatementHandler { protocol_name: self.protocol_name, + notification_service: self.notification_service, propagate_timeout: (Box::pin(interval(PROPAGATE_TIMEOUT)) as Pin + Send>>) .fuse(), @@ -183,7 +184,6 @@ impl StatementHandlerPrototype { pending_statements_peers: HashMap::new(), network, sync, - net_event_stream: net_event_stream.fuse(), sync_event_stream: sync_event_stream.fuse(), peers: HashMap::new(), statement_store, @@ -219,10 +219,10 @@ pub struct StatementHandler< network: N, /// Syncing service. sync: S, - /// Stream of networking events. - net_event_stream: stream::Fuse + Send>>>, /// Receiver for syncing-related events. sync_event_stream: stream::Fuse + Send>>>, + /// Notification service. + notification_service: Box, // All connected peers peers: HashMap, statement_store: Arc, @@ -261,14 +261,6 @@ where log::warn!(target: LOG_TARGET, "Inconsistent state, no peers for pending statement!"); } }, - network_event = self.net_event_stream.next() => { - if let Some(network_event) = network_event { - self.handle_network_event(network_event).await; - } else { - // Networking has seemingly closed. Closing as well. - return; - } - }, sync_event = self.sync_event_stream.next() => { if let Some(sync_event) = sync_event { self.handle_sync_event(sync_event); @@ -277,6 +269,14 @@ where return; } } + event = self.notification_service.next_event().fuse() => { + if let Some(event) = event { + self.handle_notification_event(event) + } else { + // `Notifications` has seemingly closed. Closing as well. + return + } + } } } } @@ -306,14 +306,24 @@ where } } - async fn handle_network_event(&mut self, event: Event) { + fn handle_notification_event(&mut self, event: NotificationEvent) { match event { - Event::Dht(_) => {}, - Event::NotificationStreamOpened { remote, protocol, role, .. } - if protocol == self.protocol_name => - { + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => { + // only accept peers whose role can be determined + let result = self + .network + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); + }, + NotificationEvent::NotificationStreamOpened { peer, handshake, .. } => { + let Some(role) = self.network.peer_role(peer, handshake) else { + log::debug!(target: LOG_TARGET, "role for {peer} couldn't be determined"); + return + }; + let _was_in = self.peers.insert( - remote, + peer, Peer { known_statements: LruHashSet::new( NonZeroUsize::new(MAX_KNOWN_STATEMENTS).expect("Constant is nonzero"), @@ -323,39 +333,26 @@ where ); debug_assert!(_was_in.is_none()); }, - Event::NotificationStreamClosed { remote, protocol } - if protocol == self.protocol_name => - { - let _peer = self.peers.remove(&remote); + NotificationEvent::NotificationStreamClosed { peer } => { + let _peer = self.peers.remove(&peer); debug_assert!(_peer.is_some()); }, + NotificationEvent::NotificationReceived { peer, notification } => { + // Accept statements only when node is not major syncing + if self.sync.is_major_syncing() { + log::trace!( + target: LOG_TARGET, + "{peer}: Ignoring statements while major syncing or offline" + ); + return + } - Event::NotificationsReceived { remote, messages } => { - for (protocol, message) in messages { - if protocol != self.protocol_name { - continue - } - // Accept statements only when node is not major syncing - if self.sync.is_major_syncing() { - log::trace!( - target: LOG_TARGET, - "{remote}: Ignoring statements while major syncing or offline" - ); - continue - } - if let Ok(statements) = ::decode(&mut message.as_ref()) { - self.on_statements(remote, statements); - } else { - log::debug!( - target: LOG_TARGET, - "Failed to decode statement list from {remote}" - ); - } + if let Ok(statements) = ::decode(&mut notification.as_ref()) { + self.on_statements(peer, statements); + } else { + log::debug!(target: LOG_TARGET, "Failed to decode statement list from {peer}"); } }, - - // Not our concern. - Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {}, } } diff --git a/substrate/client/network/sync/src/engine.rs b/substrate/client/network/sync/src/engine.rs index 2cb8eab22f7..d7b024cd801 100644 --- a/substrate/client/network/sync/src/engine.rs +++ b/substrate/client/network/sync/src/engine.rs @@ -38,7 +38,7 @@ use crate::{ warp::{EncodedProof, WarpProofRequest, WarpSyncParams}, }; -use codec::{Decode, Encode}; +use codec::{Decode, DecodeAll, Encode}; use futures::{ channel::oneshot, future::{BoxFuture, Fuse}, @@ -61,9 +61,12 @@ use sc_network::{ FullNetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, NotificationHandshake, ProtocolId, SetConfig, }, + peer_store::{PeerStoreHandle, PeerStoreProvider}, request_responses::{IfDisconnected, RequestFailure}, + service::traits::{Direction, NotificationEvent, ValidationResult}, + types::ProtocolName, utils::LruHashSet, - NotificationsSink, ProtocolName, ReputationChange, + NotificationService, ReputationChange, }; use sc_network_common::{ role::Roles, @@ -88,15 +91,15 @@ use std::{ time::{Duration, Instant}, }; -/// Log target for this file. -const LOG_TARGET: &'static str = "sync"; - /// Interval at which we perform time based maintenance const TICK_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(1100); /// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead +/// Logging target for the file. +const LOG_TARGET: &str = "sync"; + /// If the block announces stream to peer has been inactive for 30 seconds meaning local node /// has not sent or received block announcements to/from the peer, report the node for inactivity, /// disconnect it and attempt to establish connection to some other peer. @@ -226,8 +229,6 @@ pub struct Peer { pub info: ExtendedPeerInfo, /// Holds a set of blocks known to this peer. pub known_blocks: LruHashSet, - /// Notification sink. - sink: NotificationsSink, /// Is the peer inbound. inbound: bool, } @@ -252,9 +253,6 @@ pub struct SyncingEngine { /// Channel for receiving service commands service_rx: TracingUnboundedReceiver>, - /// Channel for receiving inbound connections from `Protocol`. - sync_events_rx: sc_utils::mpsc::TracingUnboundedReceiver>, - /// Assigned roles. roles: Roles, @@ -312,12 +310,18 @@ pub struct SyncingEngine { /// Prometheus metrics. metrics: Option, + /// Handle that is used to communicate with `sc_network::Notifications`. + notification_service: Box, + /// When the syncing was started. /// /// Stored as an `Option` so once the initial wait has passed, `SyncingEngine` /// can reset the peer timers and continue with the normal eviction process. syncing_started: Option, + /// Handle to `PeerStore`. + peer_store_handle: PeerStoreHandle, + /// Instant when the last notification was sent or received. last_notification_io: Instant, @@ -362,7 +366,7 @@ where block_downloader: Arc>, state_request_protocol_name: ProtocolName, warp_sync_protocol_name: Option, - sync_events_rx: sc_utils::mpsc::TracingUnboundedReceiver>, + peer_store_handle: PeerStoreHandle, ) -> Result<(Self, SyncingService, NonDefaultSetConfig), ClientError> { let mode = net_config.network_config.sync_mode; let max_parallel_downloads = net_config.network_config.max_parallel_downloads; @@ -387,7 +391,7 @@ where } for config in net_config.notification_protocols() { let peer_ids = config - .set_config + .set_config() .reserved_nodes .iter() .map(|info| info.peer_id) @@ -438,7 +442,7 @@ where let warp_sync_target_block_header_rx_fused = warp_sync_target_block_header_rx .map_or(futures::future::pending().boxed().fuse(), |rx| rx.boxed().fuse()); - let block_announce_config = Self::get_block_announce_proto_config( + let (block_announce_config, notification_service) = Self::get_block_announce_proto_config( protocol_id, fork_id, roles, @@ -450,7 +454,6 @@ where .flatten() .expect("Genesis block exists; qed"), ); - let block_announce_protocol_name = block_announce_config.notifications_protocol.clone(); let chain_sync = ChainSync::new( mode, @@ -460,6 +463,7 @@ where warp_sync_config, )?; + let block_announce_protocol_name = block_announce_config.protocol_name().clone(); let (tx, service_rx) = tracing_unbounded("mpsc_chain_sync", 100_000); let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); @@ -496,7 +500,6 @@ where num_connected: num_connected.clone(), is_major_syncing: is_major_syncing.clone(), service_rx, - sync_events_rx, genesis_hash, important_peers, default_peers_set_no_slot_connected_peers: HashSet::new(), @@ -508,8 +511,10 @@ where num_in_peers: 0usize, max_in_peers, event_streams: Vec::new(), + notification_service, tick_timeout, syncing_started: None, + peer_store_handle, last_notification_io: Instant::now(), metrics: if let Some(r) = metrics_registry { match Metrics::register(r, is_major_syncing.clone()) { @@ -673,23 +678,11 @@ where }; self.last_notification_io = Instant::now(); - peer.sink.send_sync_notification(message.encode()); + let _ = self.notification_service.send_sync_notification(peer_id, message.encode()); } } } - /// Inform sync about new best imported block. - pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - log::debug!(target: LOG_TARGET, "New best block imported {hash:?}/#{number}"); - - self.chain_sync.update_chain_info(&hash, number); - self.network_service.set_notification_handshake( - self.block_announce_protocol_name.clone(), - BlockAnnouncesHandshake::::build(self.roles, number, hash, self.genesis_hash) - .encode(), - ) - } - pub async fn run(mut self) { self.syncing_started = Some(Instant::now()); @@ -698,8 +691,10 @@ where _ = self.tick_timeout.tick() => self.perform_periodic_actions(), command = self.service_rx.select_next_some() => self.process_service_command(command), - sync_event = self.sync_events_rx.select_next_some() => - self.process_sync_event(sync_event), + notification_event = self.notification_service.next_event() => match notification_event { + Some(event) => self.process_notification_event(event), + None => return, + }, warp_target_block_header = &mut self.warp_sync_target_block_header_rx_fused => self.pass_warp_sync_target_block_header(warp_target_block_header), response_event = self.pending_responses.select_next_some() => @@ -853,8 +848,20 @@ where } }, ToServiceCommand::AnnounceBlock(hash, data) => self.announce_block(hash, data), - ToServiceCommand::NewBestBlockImported(hash, number) => - self.new_best_block_imported(hash, number), + ToServiceCommand::NewBestBlockImported(hash, number) => { + log::debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); + + self.chain_sync.update_chain_info(&hash, number); + let _ = self.notification_service.try_set_handshake( + BlockAnnouncesHandshake::::build( + self.roles, + number, + hash, + self.genesis_hash, + ) + .encode(), + ); + }, ToServiceCommand::Status(tx) => { let mut status = self.chain_sync.status(); status.num_connected_peers = self.peers.len() as u32; @@ -894,56 +901,60 @@ where } } - fn process_sync_event(&mut self, event: sc_network::SyncEvent) { + fn process_notification_event(&mut self, event: NotificationEvent) { match event { - sc_network::SyncEvent::NotificationStreamOpened { - remote, - received_handshake, - sink, - inbound, - tx, - } => match self.on_sync_peer_connected(remote, &received_handshake, sink, inbound) { - Ok(()) => { - let _ = tx.send(true); - }, - Err(()) => { - log::debug!( - target: LOG_TARGET, - "Failed to register peer {remote:?}: {received_handshake:?}", - ); - let _ = tx.send(false); - }, - }, - sc_network::SyncEvent::NotificationStreamClosed { remote } => { - if self.on_sync_peer_disconnected(remote).is_err() { - log::trace!( - target: LOG_TARGET, - "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", - remote - ); - } + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx } => { + let validation_result = self + .validate_connection(&peer, handshake, Direction::Inbound) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + + let _ = result_tx.send(validation_result); }, - sc_network::SyncEvent::NotificationsReceived { remote, messages } => { - for message in messages { - if self.peers.contains_key(&remote) { - if let Ok(announce) = BlockAnnounce::decode(&mut message.as_ref()) { - self.last_notification_io = Instant::now(); - self.push_block_announce_validation(remote, announce); - } else { - log::warn!(target: "sub-libp2p", "Failed to decode block announce"); + NotificationEvent::NotificationStreamOpened { peer, handshake, direction, .. } => { + log::debug!( + target: LOG_TARGET, + "Substream opened for {peer}, handshake {handshake:?}" + ); + + match self.validate_connection(&peer, handshake, direction) { + Ok(handshake) => { + if self.on_sync_peer_connected(peer, &handshake, direction).is_err() { + log::debug!(target: LOG_TARGET, "Failed to register peer {peer}"); + self.network_service + .disconnect_peer(peer, self.block_announce_protocol_name.clone()); } - } else { - log::trace!( - target: LOG_TARGET, - "Received sync for peer earlier refused by sync layer: {remote}", - ); - } + }, + Err(wrong_genesis) => { + log::debug!(target: LOG_TARGET, "`SyncingEngine` rejected {peer}"); + + if wrong_genesis { + self.peer_store_handle.report_peer(peer, rep::GENESIS_MISMATCH); + } + + self.network_service + .disconnect_peer(peer, self.block_announce_protocol_name.clone()); + }, } }, - sc_network::SyncEvent::NotificationSinkReplaced { remote, sink } => { - if let Some(peer) = self.peers.get_mut(&remote) { - peer.sink = sink; + NotificationEvent::NotificationStreamClosed { peer } => { + self.on_sync_peer_disconnected(peer); + }, + NotificationEvent::NotificationReceived { peer, notification } => { + if !self.peers.contains_key(&peer) { + log::error!( + target: LOG_TARGET, + "received notification from {peer} who had been earlier refused by `SyncingEngine`", + ); + return } + + let Ok(announce) = BlockAnnounce::decode(&mut notification.as_ref()) else { + log::warn!(target: LOG_TARGET, "failed to decode block announce"); + return + }; + + self.last_notification_io = Instant::now(); + self.push_block_announce_validation(peer, announce); }, } } @@ -965,129 +976,167 @@ where /// Called by peer when it is disconnecting. /// /// Returns a result if the handshake of this peer was indeed accepted. - fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) -> Result<(), ()> { - if let Some(info) = self.peers.remove(&peer_id) { - if self.important_peers.contains(&peer_id) { - log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected"); - } else { - log::debug!(target: LOG_TARGET, "{peer_id} disconnected"); - } - - if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) && - info.inbound && info.info.roles.is_full() - { - match self.num_in_peers.checked_sub(1) { - Some(value) => { - self.num_in_peers = value; - }, - None => { - log::error!( - target: LOG_TARGET, - "trying to disconnect an inbound node which is not counted as inbound" - ); - debug_assert!(false); - }, - } - } - - self.chain_sync.peer_disconnected(&peer_id); + fn on_sync_peer_disconnected(&mut self, peer_id: PeerId) { + let Some(info) = self.peers.remove(&peer_id) else { + log::debug!(target: LOG_TARGET, "{peer_id} does not exist in `SyncingEngine`"); + return + }; - self.pending_responses.remove(&peer_id); - self.event_streams.retain(|stream| { - stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok() - }); - Ok(()) + if self.important_peers.contains(&peer_id) { + log::warn!(target: LOG_TARGET, "Reserved peer {peer_id} disconnected"); } else { - Err(()) + log::debug!(target: LOG_TARGET, "{peer_id} disconnected"); } - } - /// Called on the first connection between two peers on the default set, after their exchange - /// of handshake. - /// - /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync - /// from. - fn on_sync_peer_connected( - &mut self, - peer_id: PeerId, - status: &BlockAnnouncesHandshake, - sink: NotificationsSink, - inbound: bool, - ) -> Result<(), ()> { - log::trace!(target: LOG_TARGET, "New peer {peer_id} {status:?}"); - - if self.peers.contains_key(&peer_id) { - log::error!( - target: LOG_TARGET, - "Called on_sync_peer_connected with already connected peer {peer_id}", - ); - debug_assert!(false); - return Err(()) + if !self.default_peers_set_no_slot_connected_peers.remove(&peer_id) && + info.inbound && info.info.roles.is_full() + { + match self.num_in_peers.checked_sub(1) { + Some(value) => { + self.num_in_peers = value; + }, + None => { + log::error!( + target: LOG_TARGET, + "trying to disconnect an inbound node which is not counted as inbound" + ); + debug_assert!(false); + }, + } } - if status.genesis_hash != self.genesis_hash { - self.network_service.report_peer(peer_id, rep::GENESIS_MISMATCH); + self.chain_sync.peer_disconnected(&peer_id); + self.pending_responses.remove(&peer_id); + self.event_streams + .retain(|stream| stream.unbounded_send(SyncEvent::PeerDisconnected(peer_id)).is_ok()); + } + /// Validate received handshake. + fn validate_handshake( + &mut self, + peer_id: &PeerId, + handshake: Vec, + ) -> Result, bool> { + log::trace!(target: LOG_TARGET, "Validate handshake for {peer_id}"); + + let handshake = as DecodeAll>::decode_all(&mut &handshake[..]) + .map_err(|error| { + log::debug!(target: LOG_TARGET, "Failed to decode handshake for {peer_id}: {error:?}"); + false + })?; + + if handshake.genesis_hash != self.genesis_hash { if self.important_peers.contains(&peer_id) { log::error!( target: LOG_TARGET, - "Reserved peer id `{}` is on a different chain (our genesis: {} theirs: {})", - peer_id, + "Reserved peer id `{peer_id}` is on a different chain (our genesis: {} theirs: {})", self.genesis_hash, - status.genesis_hash, + handshake.genesis_hash, ); } else if self.boot_node_ids.contains(&peer_id) { log::error!( target: LOG_TARGET, - "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", - peer_id, + "Bootnode with peer id `{peer_id}` is on a different chain (our genesis: {} theirs: {})", self.genesis_hash, - status.genesis_hash, + handshake.genesis_hash, ); } else { log::debug!( target: LOG_TARGET, "Peer is on different chain (our genesis: {} theirs: {})", - self.genesis_hash, status.genesis_hash + self.genesis_hash, + handshake.genesis_hash ); } - return Err(()) + return Err(true) } - let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&peer_id); - let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; + Ok(handshake) + } - // make sure to accept no more than `--in-peers` many full nodes - if !no_slot_peer && - status.roles.is_full() && - inbound && self.num_in_peers == self.max_in_peers - { - log::debug!( + /// Validate connection. + // NOTE Returning `Err(bool)` is a really ugly hack to work around the issue + // that `ProtocolController` thinks the peer is connected when in fact it can + // still be under validation. If the peer has different genesis than the + // local node the validation fails but the peer cannot be reported in + // `validate_connection()` as that is also called by + // `ValiateInboundSubstream` which means that the peer is still being + // validated and banning the peer when handling that event would + // result in peer getting dropped twice. + // + // The proper way to fix this is to integrate `ProtocolController` more + // tightly with `NotificationService` or add an additional API call for + // banning pre-accepted peers (which is not desirable) + fn validate_connection( + &mut self, + peer_id: &PeerId, + handshake: Vec, + direction: Direction, + ) -> Result, bool> { + log::trace!(target: LOG_TARGET, "New peer {peer_id} {handshake:?}"); + + let handshake = self.validate_handshake(peer_id, handshake)?; + + if self.peers.contains_key(&peer_id) { + log::error!( target: LOG_TARGET, - "All inbound slots have been consumed, rejecting {peer_id}", + "Called `validate_connection()` with already connected peer {peer_id}", ); - return Err(()) + debug_assert!(false); + return Err(false) } - if status.roles.is_full() && + let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&peer_id); + let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 }; + + if handshake.roles.is_full() && self.chain_sync.num_peers() >= self.default_peers_set_num_full + self.default_peers_set_no_slot_connected_peers.len() + this_peer_reserved_slot { log::debug!(target: LOG_TARGET, "Too many full nodes, rejecting {peer_id}"); - return Err(()) + return Err(false) } - if status.roles.is_light() && + // make sure to accept no more than `--in-peers` many full nodes + if !no_slot_peer && + handshake.roles.is_full() && + direction.is_inbound() && + self.num_in_peers == self.max_in_peers + { + log::debug!(target: LOG_TARGET, "All inbound slots have been consumed, rejecting {peer_id}"); + return Err(false) + } + + // make sure that all slots are not occupied by light peers + // + // `ChainSync` only accepts full peers whereas `SyncingEngine` accepts both full and light + // peers. Verify that there is a slot in `SyncingEngine` for the inbound light peer + if handshake.roles.is_light() && (self.peers.len() - self.chain_sync.num_peers()) >= self.default_peers_set_num_light { - // Make sure that not all slots are occupied by light clients. log::debug!(target: LOG_TARGET, "Too many light nodes, rejecting {peer_id}"); - return Err(()) + return Err(false) } + Ok(handshake) + } + + /// Called on the first connection between two peers on the default set, after their exchange + /// of handshake. + /// + /// Returns `Ok` if the handshake is accepted and the peer added to the list of peers we sync + /// from. + fn on_sync_peer_connected( + &mut self, + peer_id: PeerId, + status: &BlockAnnouncesHandshake, + direction: Direction, + ) -> Result<(), ()> { + log::trace!(target: LOG_TARGET, "New peer {peer_id} {status:?}"); + let peer = Peer { info: ExtendedPeerInfo { roles: status.roles, @@ -1097,8 +1146,7 @@ where known_blocks: LruHashSet::new( NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), ), - sink, - inbound, + inbound: direction.is_inbound(), }; self.chain_sync.new_peer(peer_id, peer.info.best_hash, peer.info.best_number); @@ -1106,10 +1154,11 @@ where log::debug!(target: LOG_TARGET, "Connected {peer_id}"); self.peers.insert(peer_id, peer); + self.peer_store_handle.set_peer_role(&peer_id, status.roles.into()); - if no_slot_peer { + if self.default_peers_set_no_slot_peers.contains(&peer_id) { self.default_peers_set_no_slot_connected_peers.insert(peer_id); - } else if inbound && status.roles.is_full() { + } else if direction.is_inbound() && status.roles.is_full() { self.num_in_peers += 1; } @@ -1333,7 +1382,7 @@ where best_number: NumberFor, best_hash: B::Hash, genesis_hash: B::Hash, - ) -> NonDefaultSetConfig { + ) -> (NonDefaultSetConfig, Box) { let block_announces_protocol = { let genesis_hash = genesis_hash.as_ref(); if let Some(ref fork_id) = fork_id { @@ -1347,14 +1396,11 @@ where } }; - NonDefaultSetConfig { - notifications_protocol: block_announces_protocol.into(), - fallback_names: iter::once( - format!("/{}/block-announces/1", protocol_id.as_ref()).into(), - ) - .collect(), - max_notification_size: MAX_BLOCK_ANNOUNCE_SIZE, - handshake: Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + NonDefaultSetConfig::new( + block_announces_protocol.into(), + iter::once(format!("/{}/block-announces/1", protocol_id.as_ref()).into()).collect(), + MAX_BLOCK_ANNOUNCE_SIZE, + Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( roles, best_number, best_hash, @@ -1362,13 +1408,13 @@ where ))), // NOTE: `set_config` will be ignored by `protocol.rs` as the block announcement // protocol is still hardcoded into the peerset. - set_config: SetConfig { + SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Deny, }, - } + ) } /// Import blocks. diff --git a/substrate/client/network/sync/src/service/mock.rs b/substrate/client/network/sync/src/service/mock.rs index 885eb1f8da5..47986a71d01 100644 --- a/substrate/client/network/sync/src/service/mock.rs +++ b/substrate/client/network/sync/src/service/mock.rs @@ -27,6 +27,7 @@ use sc_network::{ NetworkNotification, NetworkPeers, NetworkRequest, NetworkSyncForkRequest, NotificationSenderError, NotificationSenderT, ReputationChange, }; +use sc_network_common::role::ObservedRole; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::collections::HashSet; @@ -105,6 +106,7 @@ mockall::mock! { peers: Vec ) -> Result<(), String>; fn sync_num_connected(&self) -> usize; + fn peer_role(&self, peer_id: PeerId, handshake: Vec) -> Option; } #[async_trait::async_trait] diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index cfc3cb7af3f..71f13b74a53 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -58,7 +58,7 @@ use sc_network::{ request_responses::ProtocolConfig as RequestResponseConfig, types::ProtocolName, Multiaddr, NetworkBlock, NetworkService, NetworkStateInfo, NetworkSyncForkRequest, - NetworkWorker, + NetworkWorker, NotificationService, }; use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -239,6 +239,7 @@ pub struct Peer { imported_blocks_stream: Pin> + Send>>, finality_notification_stream: Pin> + Send>>, listen_addr: Multiaddr, + notification_services: HashMap>, } impl Peer @@ -263,8 +264,8 @@ where } /// Returns the number of peers we're connected to. - pub fn num_peers(&self) -> usize { - self.network.num_connected_peers() + pub async fn num_peers(&self) -> usize { + self.sync_service.status().await.unwrap().num_connected_peers as usize } /// Returns the number of downloaded blocks. @@ -502,10 +503,19 @@ where self.network.service() } + /// Get `SyncingService`. pub fn sync_service(&self) -> &Arc> { &self.sync_service } + /// Take notification handle for enabled protocol. + pub fn take_notification_service( + &mut self, + protocol: &ProtocolName, + ) -> Option> { + self.notification_services.remove(protocol) + } + /// Get a reference to the network worker. pub fn network(&self) -> &NetworkWorker::Hash> { &self.network @@ -778,6 +788,23 @@ pub trait TestNetFactory: Default + Sized + Send { network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; + + let (notif_configs, notif_handles): (Vec<_>, Vec<_>) = config + .notifications_protocols + .into_iter() + .map(|p| { + let (config, handle) = NonDefaultSetConfig::new( + p.clone(), + Vec::new(), + 1024 * 1024, + None, + Default::default(), + ); + + (config, (p, handle)) + }) + .unzip(); + if let Some(connect_to) = config.connect_to_peers { let addrs = connect_to .iter() @@ -849,11 +876,16 @@ pub trait TestNetFactory: Default + Sized + Send { protocol_config }; + let peer_store = PeerStore::new( + network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), + ); + let peer_store_handle = peer_store.handle(); + self.spawn_task(peer_store.run().boxed()); + let block_announce_validator = config .block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)); - let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let (engine, sync_service, block_announce_config) = sc_network_sync::engine::SyncingEngine::new( Roles::from(if config.is_authority { &Role::Authority } else { &Role::Full }), @@ -869,7 +901,7 @@ pub trait TestNetFactory: Default + Sized + Send { block_relay_params.downloader, state_request_protocol_config.name.clone(), Some(warp_protocol_config.name.clone()), - rx, + peer_store_handle.clone(), ) .unwrap(); let sync_service_import_queue = Box::new(sync_service.clone()); @@ -887,22 +919,10 @@ pub trait TestNetFactory: Default + Sized + Send { full_net_config.add_request_response_protocol(config); } - for protocol in config.notifications_protocols { - full_net_config.add_notification_protocol(NonDefaultSetConfig { - notifications_protocol: protocol, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }); + for config in notif_configs { + full_net_config.add_notification_protocol(config); } - let peer_store = PeerStore::new( - network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), - ); - let peer_store_handle = peer_store.handle(); - self.spawn_task(peer_store.run().boxed()); - let genesis_hash = client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); let network = NetworkWorker::new(sc_network::config::Params { @@ -917,7 +937,6 @@ pub trait TestNetFactory: Default + Sized + Send { fork_id, metrics_registry: None, block_announce_config, - tx, }) .unwrap(); @@ -953,6 +972,7 @@ pub trait TestNetFactory: Default + Sized + Send { backend: Some(backend), imported_blocks_stream, finality_notification_stream, + notification_services: HashMap::from_iter(notif_handles.into_iter()), block_import, verifier, network, @@ -967,20 +987,6 @@ pub trait TestNetFactory: Default + Sized + Send { tokio::spawn(f); } - /// Polls the testnet until all peers are connected to each other. - /// - /// Must be executed in a task context. - fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> { - self.poll(cx); - - let num_peers = self.peers().len(); - if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) { - return Poll::Ready(()) - } - - Poll::Pending - } - async fn is_in_sync(&mut self) -> bool { let mut highest = None; let peers = self.peers_mut(); @@ -1058,10 +1064,27 @@ pub trait TestNetFactory: Default + Sized + Send { } /// Run the network until all peers are connected to each other. - /// - /// Calls `poll_until_connected` repeatedly with the runtime passed as parameter. async fn run_until_connected(&mut self) { - futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)).await; + let num_peers = self.peers().len(); + let sync_services = + self.peers().iter().map(|info| info.sync_service.clone()).collect::>(); + + 'outer: loop { + for sync_service in &sync_services { + if sync_service.status().await.unwrap().num_connected_peers as usize != + num_peers - 1 + { + futures::future::poll_fn::<(), _>(|cx| { + self.poll(cx); + Poll::Ready(()) + }) + .await; + continue 'outer + } + } + + break + } } /// Polls the testnet. Processes all the pending actions. diff --git a/substrate/client/network/test/src/service.rs b/substrate/client/network/test/src/service.rs index 62d7f9f9d1b..800c0d4369c 100644 --- a/substrate/client/network/test/src/service.rs +++ b/substrate/client/network/test/src/service.rs @@ -24,8 +24,9 @@ use sc_network::{ config::{self, FullNetworkConfiguration, MultiaddrWithPeerId, ProtocolId, TransportConfig}, event::Event, peer_store::PeerStore, - NetworkEventStream, NetworkNotification, NetworkPeers, NetworkService, NetworkStateInfo, - NetworkWorker, + service::traits::{NotificationEvent, ValidationResult}, + NetworkEventStream, NetworkPeers, NetworkService, NetworkStateInfo, NetworkWorker, + NotificationService, }; use sc_network_common::role::Roles; use sc_network_light::light_client_requests::handler::LightClientRequestHandler; @@ -116,7 +117,7 @@ impl TestNetworkBuilder { self } - pub fn build(mut self) -> TestNetwork { + pub fn build(mut self) -> (TestNetwork, Option>) { let client = self.client.as_mut().map_or( Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0), |v| v.clone(), @@ -183,7 +184,12 @@ impl TestNetworkBuilder { protocol_config }; - let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); + let peer_store = PeerStore::new( + network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), + ); + let peer_store_handle = peer_store.handle(); + tokio::spawn(peer_store.run().boxed()); + let (engine, chain_sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config::Role::Full), client.clone(), @@ -198,24 +204,27 @@ impl TestNetworkBuilder { block_relay_params.downloader, state_request_protocol_config.name.clone(), None, - rx, + peer_store_handle.clone(), ) .unwrap(); let mut link = self.link.unwrap_or(Box::new(chain_sync_service.clone())); - if !self.notification_protocols.is_empty() { + let handle = if !self.notification_protocols.is_empty() { for config in self.notification_protocols { full_net_config.add_notification_protocol(config); } + None } else { - full_net_config.add_notification_protocol(config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME.into(), - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - handshake: None, - set_config: self.set_config.unwrap_or_default(), - }); - } + let (config, handle) = config::NonDefaultSetConfig::new( + PROTOCOL_NAME.into(), + Vec::new(), + 1024 * 1024, + None, + self.set_config.unwrap_or_default(), + ); + full_net_config.add_notification_protocol(config); + Some(handle) + }; for config in [ block_relay_params.request_response_config, @@ -225,12 +234,6 @@ impl TestNetworkBuilder { full_net_config.add_request_response_protocol(config); } - let peer_store = PeerStore::new( - network_config.boot_nodes.iter().map(|bootnode| bootnode.peer_id).collect(), - ); - let peer_store_handle = peer_store.handle(); - tokio::spawn(peer_store.run().boxed()); - let genesis_hash = client.hash(Zero::zero()).ok().flatten().expect("Genesis block exists; qed"); let worker = NetworkWorker::< @@ -248,7 +251,6 @@ impl TestNetworkBuilder { protocol_id, fork_id, metrics_registry: None, - tx, }) .unwrap(); @@ -268,7 +270,7 @@ impl TestNetworkBuilder { }); tokio::spawn(engine.run()); - TestNetwork::new(worker) + (TestNetwork::new(worker), handle) } } @@ -276,18 +278,18 @@ impl TestNetworkBuilder { /// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. fn build_nodes_one_proto() -> ( Arc, - impl Stream, + Option>, Arc, - impl Stream, + Option>, ) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (node1, events_stream1) = TestNetworkBuilder::new() + let (network1, handle1) = TestNetworkBuilder::new() .with_listen_addresses(vec![listen_addr.clone()]) - .build() - .start_network(); + .build(); + let (node1, _) = network1.start_network(); - let (node2, events_stream2) = TestNetworkBuilder::new() + let (network2, handle2) = TestNetworkBuilder::new() .with_set_config(config::SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, @@ -295,10 +297,11 @@ fn build_nodes_one_proto() -> ( }], ..Default::default() }) - .build() - .start_network(); + .build(); + + let (node2, _) = network2.start_network(); - (node1, events_stream1, node2, events_stream2) + (node1, handle1, node2, handle2) } #[tokio::test] @@ -306,22 +309,15 @@ async fn notifications_state_consistent() { // Runs two nodes and ensures that events are propagated out of the API in a consistent // correct order, which means no notification received on a closed substream. - let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let (node1, handle1, node2, handle2) = build_nodes_one_proto(); + let (mut handle1, mut handle2) = (handle1.unwrap(), handle2.unwrap()); // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle1.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec()); } for _ in 0..(rand::random::() % 5) { - node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle2.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec()); } // True if we have an active substream from node1 to node2. @@ -343,18 +339,10 @@ async fn notifications_state_consistent() { // Start by sending a notification from node1 to node2 and vice-versa. Part of the // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { - node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle1.send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec()); } if rand::random::() % 5 >= 3 { - node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle2.send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec()); } // Also randomly disconnect the two nodes from time to time. @@ -367,8 +355,8 @@ async fn notifications_state_consistent() { // Grab next event from either `events_stream1` or `events_stream2`. let next_event = { - let next1 = events_stream1.next(); - let next2 = events_stream2.next(); + let next1 = handle1.next_event(); + let next2 = handle2.next_event(); // We also await on a small timer, otherwise it is possible for the test to wait // forever while nothing at all happens on the network. let continue_test = futures_timer::Delay::new(Duration::from_millis(20)); @@ -383,58 +371,55 @@ async fn notifications_state_consistent() { }; match next_event { - future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { - something_happened = true; - assert!(!node1_to_node2_open); - node1_to_node2_open = true; - assert_eq!(remote, node2.local_peer_id()); - }, - future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { - something_happened = true; - assert!(!node2_to_node1_open); - node2_to_node1_open = true; - assert_eq!(remote, node1.local_peer_id()); - }, - future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { - assert!(node1_to_node2_open); - node1_to_node2_open = false; - assert_eq!(remote, node2.local_peer_id()); - }, - future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) => - if protocol == PROTOCOL_NAME.into() { - assert!(node2_to_node1_open); - node2_to_node1_open = false; - assert_eq!(remote, node1.local_peer_id()); - }, - future::Either::Left(Event::NotificationsReceived { remote, .. }) => { + future::Either::Left(NotificationEvent::ValidateInboundSubstream { + result_tx, .. + }) => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + future::Either::Right(NotificationEvent::ValidateInboundSubstream { + result_tx, + .. + }) => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + future::Either::Left(NotificationEvent::NotificationStreamOpened { peer, .. }) => { + something_happened = true; + assert!(!node1_to_node2_open); + node1_to_node2_open = true; + assert_eq!(peer, node2.local_peer_id()); + }, + future::Either::Right(NotificationEvent::NotificationStreamOpened { peer, .. }) => { + something_happened = true; + assert!(!node2_to_node1_open); + node2_to_node1_open = true; + assert_eq!(peer, node1.local_peer_id()); + }, + future::Either::Left(NotificationEvent::NotificationStreamClosed { peer, .. }) => { assert!(node1_to_node2_open); - assert_eq!(remote, node2.local_peer_id()); + node1_to_node2_open = false; + assert_eq!(peer, node2.local_peer_id()); + }, + future::Either::Right(NotificationEvent::NotificationStreamClosed { peer, .. }) => { + assert!(node2_to_node1_open); + node2_to_node1_open = false; + assert_eq!(peer, node1.local_peer_id()); + }, + future::Either::Left(NotificationEvent::NotificationReceived { peer, .. }) => { + assert!(node1_to_node2_open); + assert_eq!(peer, node2.local_peer_id()); if rand::random::() % 5 >= 4 { - node1.write_notification( - node2.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle1 + .send_sync_notification(&node2.local_peer_id(), b"hello world".to_vec()); } }, - future::Either::Right(Event::NotificationsReceived { remote, .. }) => { + future::Either::Right(NotificationEvent::NotificationReceived { peer, .. }) => { assert!(node2_to_node1_open); - assert_eq!(remote, node1.local_peer_id()); + assert_eq!(peer, node1.local_peer_id()); if rand::random::() % 5 >= 4 { - node2.write_notification( - node1.local_peer_id(), - PROTOCOL_NAME.into(), - b"hello world".to_vec(), - ); + let _ = handle2 + .send_sync_notification(&node1.local_peer_id(), b"hello world".to_vec()); } }, - - // Add new events here. - future::Either::Left(Event::Dht(_)) => {}, - future::Either::Right(Event::Dht(_)) => {}, }; } } @@ -444,20 +429,29 @@ async fn lots_of_incoming_peers_works() { sp_tracing::try_init_simple(); let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (main_node, _) = TestNetworkBuilder::new() + let (main_node, handle1) = TestNetworkBuilder::new() .with_listen_addresses(vec![listen_addr.clone()]) .with_set_config(config::SetConfig { in_peers: u32::MAX, ..Default::default() }) - .build() - .start_network(); + .build(); + let mut handle1 = handle1.unwrap(); + let (main_node, _) = main_node.start_network(); let main_node_peer_id = main_node.local_peer_id(); + tokio::spawn(async move { + while let Some(event) = handle1.next_event().await { + if let NotificationEvent::ValidateInboundSubstream { result_tx, .. } = event { + result_tx.send(ValidationResult::Accept).unwrap(); + } + } + }); + // We spawn background tasks and push them in this `Vec`. They will all be waited upon before // this test ends. let mut background_tasks_to_wait = Vec::new(); for _ in 0..32 { - let (_dialing_node, event_stream) = TestNetworkBuilder::new() + let (dialing_node, handle) = TestNetworkBuilder::new() .with_set_config(config::SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr.clone(), @@ -465,8 +459,9 @@ async fn lots_of_incoming_peers_works() { }], ..Default::default() }) - .build() - .start_network(); + .build(); + let mut handle = handle.unwrap(); + let (_, _) = dialing_node.start_network(); background_tasks_to_wait.push(tokio::spawn(async move { // Create a dummy timer that will "never" fire, and that will be overwritten when we @@ -474,34 +469,23 @@ async fn lots_of_incoming_peers_works() { // make the code below way more complicated. let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse(); - let mut event_stream = event_stream.fuse(); - let mut sync_protocol_name = None; loop { futures::select! { _ = timer => { // Test succeeds when timer fires. return; } - ev = event_stream.next() => { - match ev.unwrap() { - Event::NotificationStreamOpened { protocol, remote, .. } => { - if let None = sync_protocol_name { - sync_protocol_name = Some(protocol.clone()); - } - - assert_eq!(remote, main_node_peer_id); - // Test succeeds after 5 seconds. This timer is here in order to - // detect a potential problem after opening. - timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); - } - Event::NotificationStreamClosed { protocol, .. } => { - if Some(protocol) != sync_protocol_name { - // Test failed. - panic!(); - } - } - _ => {} + ev = handle.next_event().fuse() => match ev.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); } + NotificationEvent::NotificationStreamOpened { peer, .. } => { + assert_eq!(peer, main_node_peer_id); + // Test succeeds after 5 seconds. This timer is here in order to + // detect a potential problem after opening. + timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); + } + _ => {} } } } @@ -518,33 +502,27 @@ async fn notifications_back_pressure() { const TOTAL_NOTIFS: usize = 10_000; - let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let (_node1, handle1, node2, handle2) = build_nodes_one_proto(); + let (mut handle1, mut handle2) = (handle1.unwrap(), handle2.unwrap()); let node2_id = node2.local_peer_id(); let receiver = tokio::spawn(async move { let mut received_notifications = 0; - let mut sync_protocol_name = None; while received_notifications < TOTAL_NOTIFS { - match events_stream2.next().await.unwrap() { - Event::NotificationStreamOpened { protocol, .. } => { - if let None = sync_protocol_name { - sync_protocol_name = Some(protocol); - } + match handle2.next_event().await.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); }, - Event::NotificationStreamClosed { protocol, .. } => { - if Some(&protocol) != sync_protocol_name.as_ref() { - panic!() - } + NotificationEvent::NotificationReceived { notification, .. } => { + assert_eq!( + notification, + format!("hello #{}", received_notifications).into_bytes() + ); + received_notifications += 1; }, - Event::NotificationsReceived { messages, .. } => - for message in messages { - assert_eq!(message.0, PROTOCOL_NAME.into()); - assert_eq!(message.1, format!("hello #{}", received_notifications)); - received_notifications += 1; - }, _ => {}, - }; + } if rand::random::() < 2 { tokio::time::sleep(Duration::from_millis(rand::random::() % 750)).await; @@ -554,20 +532,20 @@ async fn notifications_back_pressure() { // Wait for the `NotificationStreamOpened`. loop { - match events_stream1.next().await.unwrap() { - Event::NotificationStreamOpened { .. } => break, + match handle1.next_event().await.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + NotificationEvent::NotificationStreamOpened { .. } => break, _ => {}, }; } // Sending! for num in 0..TOTAL_NOTIFS { - let notif = node1.notification_sender(node2_id, PROTOCOL_NAME.into()).unwrap(); - notif - .ready() + handle1 + .send_async_notification(&node2_id, format!("hello #{}", num).into_bytes()) .await - .unwrap() - .send(format!("hello #{}", num).into_bytes()) .unwrap(); } @@ -576,28 +554,31 @@ async fn notifications_back_pressure() { #[tokio::test] async fn fallback_name_working() { + sp_tracing::try_init_simple(); // Node 1 supports the protocols "new" and "old". Node 2 only supports "old". Checks whether // they can connect. const NEW_PROTOCOL_NAME: &str = "/new-shiny-protocol-that-isnt-PROTOCOL_NAME"; let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (node1, mut events_stream1) = TestNetworkBuilder::new() - .with_notification_protocol(config::NonDefaultSetConfig { - notifications_protocol: NEW_PROTOCOL_NAME.into(), - fallback_names: vec![PROTOCOL_NAME.into()], - max_notification_size: 1024 * 1024, - handshake: None, - set_config: Default::default(), - }) + let (config, mut handle1) = config::NonDefaultSetConfig::new( + NEW_PROTOCOL_NAME.into(), + vec![PROTOCOL_NAME.into()], + 1024 * 1024, + None, + Default::default(), + ); + let (network1, _) = TestNetworkBuilder::new() + .with_notification_protocol(config) .with_config(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], transport: TransportConfig::MemoryOnly, ..config::NetworkConfiguration::new_local() }) - .build() - .start_network(); + .build(); - let (_, mut events_stream2) = TestNetworkBuilder::new() + let (node1, _) = network1.start_network(); + + let (network2, handle2) = TestNetworkBuilder::new() .with_set_config(config::SetConfig { reserved_nodes: vec![MultiaddrWithPeerId { multiaddr: listen_addr, @@ -605,34 +586,38 @@ async fn fallback_name_working() { }], ..Default::default() }) - .build() - .start_network(); + .build(); + let mut handle2 = handle2.unwrap(); + let _ = network2.start_network(); let receiver = tokio::spawn(async move { // Wait for the `NotificationStreamOpened`. loop { - match events_stream2.next().await.unwrap() { - Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } => { - assert_eq!(protocol, PROTOCOL_NAME.into()); + match handle2.next_event().await.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + NotificationEvent::NotificationStreamOpened { negotiated_fallback, .. } => { assert_eq!(negotiated_fallback, None); break }, _ => {}, - }; + } } }); // Wait for the `NotificationStreamOpened`. loop { - match events_stream1.next().await.unwrap() { - Event::NotificationStreamOpened { protocol, negotiated_fallback, .. } - if protocol == NEW_PROTOCOL_NAME.into() => - { + match handle1.next_event().await.unwrap() { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(ValidationResult::Accept).unwrap(); + }, + NotificationEvent::NotificationStreamOpened { negotiated_fallback, .. } => { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME.into())); break }, _ => {}, - }; + } } receiver.await.unwrap(); @@ -655,6 +640,7 @@ async fn ensure_listen_addresses_consistent_with_transport_memory() { ) }) .build() + .0 .start_network(); } @@ -674,6 +660,7 @@ async fn ensure_listen_addresses_consistent_with_transport_not_memory() { ) }) .build() + .0 .start_network(); } @@ -699,6 +686,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_memory() { ) }) .build() + .0 .start_network(); } @@ -723,6 +711,7 @@ async fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { ) }) .build() + .0 .start_network(); } @@ -751,6 +740,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_memory() { ) }) .build() + .0 .start_network(); } @@ -778,6 +768,7 @@ async fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { ) }) .build() + .0 .start_network(); } @@ -800,6 +791,7 @@ async fn ensure_public_addresses_consistent_with_transport_memory() { ) }) .build() + .0 .start_network(); } @@ -821,5 +813,6 @@ async fn ensure_public_addresses_consistent_with_transport_not_memory() { ) }) .build() + .0 .start_network(); } diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index 389177b4aaf..f2be662ada1 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -44,16 +44,16 @@ async fn sync_peers_works() { sp_tracing::try_init_simple(); let mut net = TestNet::new(3); - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - for peer in 0..3 { - if net.peer(peer).num_peers() != 2 { - return Poll::Pending - } - } - Poll::Ready(()) - }) - .await; + while net.peer(0).num_peers().await != 2 && + net.peer(1).num_peers().await != 2 && + net.peer(2).num_peers().await != 2 + { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Poll::Ready(()) + }) + .await; + } } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -412,15 +412,13 @@ async fn can_sync_small_non_best_forks() { assert!(net.peer(1).client().header(small_hash).unwrap().is_none()); // poll until the two nodes connect, otherwise announcing the block will not work - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { + while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); Poll::Ready(()) - } - }) - .await; + }) + .await; + } // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. @@ -465,6 +463,7 @@ async fn can_sync_forks_ahead_of_the_best_chain() { net.peer(1).push_blocks(1, false); net.run_until_connected().await; + // Peer 0 is on 2-block fork which is announced with is_best=false let fork_hash = net .peer(0) @@ -516,15 +515,13 @@ async fn can_sync_explicit_forks() { assert!(net.peer(1).client().header(small_hash).unwrap().is_none()); // poll until the two nodes connect, otherwise announcing the block will not work - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { + while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); Poll::Ready(()) - } - }) - .await; + }) + .await; + } // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. @@ -613,15 +610,14 @@ async fn full_sync_requires_block_body() { net.peer(0).push_headers(1); // Wait for nodes to connect - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { + while net.peer(0).num_peers().await == 0 || net.peer(1).num_peers().await == 0 { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); Poll::Ready(()) - } - }) - .await; + }) + .await; + } + net.run_until_idle().await; assert_eq!(net.peer(1).client.info().best_number, 0); } @@ -917,18 +913,16 @@ async fn block_announce_data_is_propagated() { }); // Wait until peer 1 is connected to both nodes. - futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).num_peers() == 2 && - net.peer(0).num_peers() == 1 && - net.peer(2).num_peers() == 1 - { + while net.peer(1).num_peers().await != 2 || + net.peer(0).num_peers().await != 1 || + net.peer(2).num_peers().await != 1 + { + futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); Poll::Ready(()) - } else { - Poll::Pending - } - }) - .await; + }) + .await; + } let block_hash = net .peer(0) @@ -1010,7 +1004,7 @@ async fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { tokio::time::sleep(tokio::time::Duration::from_secs(10)).await; net.peer(0).push_blocks(1, false); net.run_until_sync().await; - assert_eq!(1, net.peer(0).num_peers()); + assert_eq!(1, net.peer(0).num_peers().await); } let hashof10 = hashes[9]; diff --git a/substrate/client/network/transactions/src/lib.rs b/substrate/client/network/transactions/src/lib.rs index 1b97d4b96c9..9758ea4c4fc 100644 --- a/substrate/client/network/transactions/src/lib.rs +++ b/substrate/client/network/transactions/src/lib.rs @@ -21,8 +21,8 @@ //! Usage: //! //! - Use [`TransactionsHandlerPrototype::new`] to create a prototype. -//! - Pass the return value of [`TransactionsHandlerPrototype::set_config`] to the network -//! configuration as an extra peers set. +//! - Pass the `NonDefaultSetConfig` returned from [`TransactionsHandlerPrototype::new`] to the +//! network configuration as an extra peers set. //! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a //! `Future` that processes transactions. @@ -37,7 +37,7 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, error, - event::Event, + service::traits::{NotificationEvent, NotificationService, ValidationResult}, types::ProtocolName, utils::{interval, LruHashSet}, NetworkEventStream, NetworkNotification, NetworkPeers, @@ -115,8 +115,11 @@ impl Future for PendingTransaction { /// Prototype for a [`TransactionsHandler`]. pub struct TransactionsHandlerPrototype { + /// Name of the transaction protocol. protocol_name: ProtocolName, - fallback_protocol_names: Vec, + + /// Handle that is used to communicate with `sc_network::Notifications`. + notification_service: Box, } impl TransactionsHandlerPrototype { @@ -125,35 +128,28 @@ impl TransactionsHandlerPrototype { protocol_id: ProtocolId, genesis_hash: Hash, fork_id: Option<&str>, - ) -> Self { + ) -> (Self, NonDefaultSetConfig) { let genesis_hash = genesis_hash.as_ref(); - let protocol_name = if let Some(fork_id) = fork_id { + let protocol_name: ProtocolName = if let Some(fork_id) = fork_id { format!("/{}/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash), fork_id) } else { format!("/{}/transactions/1", array_bytes::bytes2hex("", genesis_hash)) - }; - let legacy_protocol_name = format!("/{}/transactions/1", protocol_id.as_ref()); - - Self { - protocol_name: protocol_name.into(), - fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(), } - } - - /// Returns the configuration of the set to put in the network configuration. - pub fn set_config(&self) -> NonDefaultSetConfig { - NonDefaultSetConfig { - notifications_protocol: self.protocol_name.clone(), - fallback_names: self.fallback_protocol_names.clone(), - max_notification_size: MAX_TRANSACTIONS_SIZE, - handshake: None, - set_config: SetConfig { + .into(); + let (config, notification_service) = NonDefaultSetConfig::new( + protocol_name.clone(), + vec![format!("/{}/transactions/1", protocol_id.as_ref()).into()], + MAX_TRANSACTIONS_SIZE, + None, + SetConfig { in_peers: 0, out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Deny, }, - } + ); + + (Self { protocol_name, notification_service }, config) } /// Turns the prototype into the actual handler. Returns a controller that allows controlling @@ -173,12 +169,12 @@ impl TransactionsHandlerPrototype { transaction_pool: Arc>, metrics_registry: Option<&Registry>, ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { - let net_event_stream = network.event_stream("transactions-handler-net"); let sync_event_stream = sync.event_stream("transactions-handler-sync"); let (to_handler, from_controller) = tracing_unbounded("mpsc_transactions_handler", 100_000); let handler = TransactionsHandler { protocol_name: self.protocol_name, + notification_service: self.notification_service, propagate_timeout: (Box::pin(interval(PROPAGATE_TIMEOUT)) as Pin + Send>>) .fuse(), @@ -186,7 +182,6 @@ impl TransactionsHandlerPrototype { pending_transactions_peers: HashMap::new(), network, sync, - net_event_stream: net_event_stream.fuse(), sync_event_stream: sync_event_stream.fuse(), peers: HashMap::new(), transaction_pool, @@ -253,8 +248,6 @@ pub struct TransactionsHandler< network: N, /// Syncing service. sync: S, - /// Stream of networking events. - net_event_stream: stream::Fuse + Send>>>, /// Receiver for syncing-related events. sync_event_stream: stream::Fuse + Send>>>, // All connected peers @@ -263,6 +256,8 @@ pub struct TransactionsHandler< from_controller: TracingUnboundedReceiver>, /// Prometheus metrics. metrics: Option, + /// Handle that is used to communicate with `sc_network::Notifications`. + notification_service: Box, } /// Peer information @@ -295,14 +290,6 @@ where warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); } }, - network_event = self.net_event_stream.next() => { - if let Some(network_event) = network_event { - self.handle_network_event(network_event).await; - } else { - // Networking has seemingly closed. Closing as well. - return; - } - }, sync_event = self.sync_event_stream.next() => { if let Some(sync_event) = sync_event { self.handle_sync_event(sync_event); @@ -317,10 +304,61 @@ where ToHandler::PropagateTransactions => self.propagate_transactions(), } }, + event = self.notification_service.next_event().fuse() => { + if let Some(event) = event { + self.handle_notification_event(event) + } else { + // `Notifications` has seemingly closed. Closing as well. + return + } + } } } } + fn handle_notification_event(&mut self, event: NotificationEvent) { + match event { + NotificationEvent::ValidateInboundSubstream { peer, handshake, result_tx, .. } => { + // only accept peers whose role can be determined + let result = self + .network + .peer_role(peer, handshake) + .map_or(ValidationResult::Reject, |_| ValidationResult::Accept); + let _ = result_tx.send(result); + }, + NotificationEvent::NotificationStreamOpened { peer, handshake, .. } => { + let Some(role) = self.network.peer_role(peer, handshake) else { + log::debug!(target: "sub-libp2p", "role for {peer} couldn't be determined"); + return + }; + + let _was_in = self.peers.insert( + peer, + Peer { + known_transactions: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS).expect("Constant is nonzero"), + ), + role, + }, + ); + debug_assert!(_was_in.is_none()); + }, + NotificationEvent::NotificationStreamClosed { peer } => { + let _peer = self.peers.remove(&peer); + debug_assert!(_peer.is_some()); + }, + NotificationEvent::NotificationReceived { peer, notification } => { + if let Ok(m) = + as Decode>::decode(&mut notification.as_ref()) + { + self.on_transactions(peer, m); + } else { + warn!(target: "sub-libp2p", "Failed to decode transactions list"); + } + }, + } + } + fn handle_sync_event(&mut self, event: SyncEvent) { match event { SyncEvent::PeerConnected(remote) => { @@ -346,51 +384,6 @@ where } } - async fn handle_network_event(&mut self, event: Event) { - match event { - Event::Dht(_) => {}, - Event::NotificationStreamOpened { remote, protocol, role, .. } - if protocol == self.protocol_name => - { - let _was_in = self.peers.insert( - remote, - Peer { - known_transactions: LruHashSet::new( - NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS).expect("Constant is nonzero"), - ), - role, - }, - ); - debug_assert!(_was_in.is_none()); - }, - Event::NotificationStreamClosed { remote, protocol } - if protocol == self.protocol_name => - { - let _peer = self.peers.remove(&remote); - debug_assert!(_peer.is_some()); - }, - - Event::NotificationsReceived { remote, messages } => { - for (protocol, message) in messages { - if protocol != self.protocol_name { - continue - } - - if let Ok(m) = - as Decode>::decode(&mut message.as_ref()) - { - self.on_transactions(remote, m); - } else { - warn!(target: "sub-libp2p", "Failed to decode transactions list"); - } - } - }, - - // Not our concern. - Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {}, - } - } - /// Called when peer sends us new transactions fn on_transactions(&mut self, who: PeerId, transactions: Transactions) { // Accept transactions only when node is not major syncing @@ -482,8 +475,7 @@ where propagated_to.entry(hash).or_default().push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - self.network - .write_notification(*who, self.protocol_name.clone(), to_send.encode()); + let _ = self.notification_service.send_sync_notification(who, to_send.encode()); } } diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs index c7df5784d32..2901bab2f26 100644 --- a/substrate/client/offchain/src/api.rs +++ b/substrate/client/offchain/src/api.rs @@ -223,7 +223,7 @@ mod tests { use sc_client_db::offchain::LocalStorage; use sc_network::{ config::MultiaddrWithPeerId, types::ProtocolName, NetworkPeers, NetworkStateInfo, - ReputationChange, + ObservedRole, ReputationChange, }; use sp_core::offchain::{storage::OffchainDb, DbExternalities, Externalities, StorageKind}; use std::time::SystemTime; @@ -294,6 +294,10 @@ mod tests { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, _handshake: Vec) -> Option { + None + } } impl NetworkStateInfo for TestNetwork { diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs index 756ab77ff94..8bcfa66a5af 100644 --- a/substrate/client/offchain/src/lib.rs +++ b/substrate/client/offchain/src/lib.rs @@ -330,7 +330,9 @@ mod tests { use libp2p::{Multiaddr, PeerId}; use sc_block_builder::BlockBuilderBuilder; use sc_client_api::Backend as _; - use sc_network::{config::MultiaddrWithPeerId, types::ProtocolName, ReputationChange}; + use sc_network::{ + config::MultiaddrWithPeerId, types::ProtocolName, ObservedRole, ReputationChange, + }; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; @@ -423,6 +425,10 @@ mod tests { fn sync_num_connected(&self) -> usize { unimplemented!(); } + + fn peer_role(&self, _peer_id: PeerId, _handshake: Vec) -> Option { + None + } } #[test] diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 25f998385ba..d078f44f198 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -753,6 +753,11 @@ where } let protocol_id = config.protocol_id(); + let genesis_hash = client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"); let block_announce_validator = if let Some(f) = block_announce_validator_builder { f(client.clone()) @@ -802,11 +807,7 @@ where // Allow both outgoing and incoming requests. let (handler, protocol_config) = WarpSyncRequestHandler::new( protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), + genesis_hash, config.chain_spec.fork_id(), warp_with_provider.clone(), ); @@ -845,17 +846,13 @@ where } // create transactions protocol and add it to the list of supported protocols of - // `network_params` - let transactions_handler_proto = sc_network_transactions::TransactionsHandlerPrototype::new( - protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - config.chain_spec.fork_id(), - ); - net_config.add_notification_protocol(transactions_handler_proto.set_config()); + let (transactions_handler_proto, transactions_config) = + sc_network_transactions::TransactionsHandlerPrototype::new( + protocol_id.clone(), + genesis_hash, + config.chain_spec.fork_id(), + ); + net_config.add_notification_protocol(transactions_config); // Create `PeerStore` and initialize it with bootnode peer ids. let peer_store = PeerStore::new( @@ -869,7 +866,6 @@ where let peer_store_handle = peer_store.handle(); spawn_handle.spawn("peer-store", Some("networking"), peer_store.run()); - let (tx, rx) = sc_utils::mpsc::tracing_unbounded("mpsc_syncing_engine_protocol", 100_000); let (engine, sync_service, block_announce_config) = SyncingEngine::new( Roles::from(&config.role), client.clone(), @@ -884,7 +880,7 @@ where block_downloader, state_request_protocol_name, warp_request_protocol_name, - rx, + peer_store_handle.clone(), )?; let sync_service_import_queue = sync_service.clone(); let sync_service = Arc::new(sync_service); @@ -905,7 +901,6 @@ where fork_id: config.chain_spec.fork_id().map(ToOwned::to_owned), metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_announce_config, - tx, }; let has_bootnodes = !network_params.network_config.network_config.boot_nodes.is_empty(); diff --git a/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat index ab678906648..e6d6ba8bb81 100644 --- a/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat +++ b/substrate/frame/contracts/fixtures/data/account_reentrance_count_call.wat @@ -14,7 +14,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat b/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat index ef456b6d620..dac7736244d 100644 --- a/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat +++ b/substrate/frame/contracts/fixtures/data/add_remove_delegate_dependency.wat @@ -16,7 +16,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -44,8 +44,8 @@ ;; [0..4) - size of the call ;; [4..8) - action to perform ;; [8..42) - code hash of the callee - (set_local $action (i32.load (i32.const 4))) - (set_local $code_hash_ptr (i32.const 8)) + (local.set $action (i32.load (i32.const 4))) + (local.set $code_hash_ptr (i32.const 8)) ;; Assert input size == 36 (4 for action + 32 for code_hash). (call $assert @@ -56,25 +56,25 @@ ) ;; Call add_delegate_dependency when action == 1. - (if (i32.eq (get_local $action) (i32.const 1)) + (if (i32.eq (local.get $action) (i32.const 1)) (then - (call $add_delegate_dependency (get_local $code_hash_ptr)) + (call $add_delegate_dependency (local.get $code_hash_ptr)) ) (else) ) ;; Call remove_delegate_dependency when action == 2. - (if (i32.eq (get_local $action) (i32.const 2)) + (if (i32.eq (local.get $action) (i32.const 2)) (then (call $remove_delegate_dependency - (get_local $code_hash_ptr) + (local.get $code_hash_ptr) ) ) (else) ) ;; Call terminate when action == 3. - (if (i32.eq (get_local $action) (i32.const 3)) + (if (i32.eq (local.get $action) (i32.const 3)) (then (call $terminate (i32.const 100) ;; Pointer to beneficiary address diff --git a/substrate/frame/contracts/fixtures/data/balance.wat b/substrate/frame/contracts/fixtures/data/balance.wat index d86d5c4b1c6..d7970c92e41 100644 --- a/substrate/frame/contracts/fixtures/data/balance.wat +++ b/substrate/frame/contracts/fixtures/data/balance.wat @@ -12,7 +12,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/call.wat b/substrate/frame/contracts/fixtures/data/call.wat index 4558b2c6409..43b32049c88 100644 --- a/substrate/frame/contracts/fixtures/data/call.wat +++ b/substrate/frame/contracts/fixtures/data/call.wat @@ -7,7 +7,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat b/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat index 3320922d9e2..5d76e19a74c 100644 --- a/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat +++ b/substrate/frame/contracts/fixtures/data/call_runtime_and_call.wat @@ -7,7 +7,7 @@ (func $assert (param i32) (block $ok - (br_if $ok (get_local 0)) + (br_if $ok (local.get 0)) (unreachable) ) ) diff --git a/substrate/frame/contracts/fixtures/data/caller_contract.wat b/substrate/frame/contracts/fixtures/data/caller_contract.wat index 929171b9a26..43eb8ccfd54 100644 --- a/substrate/frame/contracts/fixtures/data/caller_contract.wat +++ b/substrate/frame/contracts/fixtures/data/caller_contract.wat @@ -10,7 +10,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -37,10 +37,10 @@ ) ;; Read current balance into local variable. - (set_local $sp (i32.const 1024)) + (local.set $sp (i32.const 1024)) ;; Fail to deploy the contract since it returns a non-zero exit status. - (set_local $exit_code + (local.set $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. @@ -60,11 +60,11 @@ ;; Check non-zero exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted + (i32.eq (local.get $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted ) ;; Fail to deploy the contract due to insufficient ref_time weight. - (set_local $exit_code + (local.set $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i64.const 1) ;; Supply too little ref_time weight @@ -85,11 +85,11 @@ ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + (i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Fail to deploy the contract due to insufficient ref_time weight. - (set_local $exit_code + (local.set $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. @@ -110,17 +110,17 @@ ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + (i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Length of the output buffer (i32.store - (i32.sub (get_local $sp) (i32.const 4)) + (i32.sub (local.get $sp) (i32.const 4)) (i32.const 256) ) ;; Deploy the contract successfully. - (set_local $exit_code + (local.set $exit_code (call $seal_instantiate (i32.const 24) ;; Pointer to the code hash. (i64.const 0) ;; How much ref_time weight to devote for the execution. 0 = all. @@ -130,7 +130,7 @@ (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer (i32.const 16) ;; Pointer to the address output buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Pointer to the address buffer length + (i32.sub (local.get $sp) (i32.const 4)) ;; Pointer to the address buffer length (i32.const 4294967295) ;; u32 max sentinel value: do not copy output (i32.const 0) ;; Length is ignored in this case (i32.const 0) ;; salt_ptr @@ -141,28 +141,28 @@ ;; Check for success exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) ;; Check that address has the expected length (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 4))) (i32.const 32)) + (i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 4))) (i32.const 32)) ) ;; Zero out destination buffer of output (i32.store - (i32.sub (get_local $sp) (i32.const 4)) + (i32.sub (local.get $sp) (i32.const 4)) (i32.const 0) ) ;; Length of the output buffer (i32.store - (i32.sub (get_local $sp) (i32.const 8)) + (i32.sub (local.get $sp) (i32.const 8)) (i32.const 4) ) ;; Call the new contract and expect it to return failing exit code. - (set_local $exit_code + (local.set $exit_code (call $seal_call (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. @@ -172,29 +172,29 @@ (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 9) ;; Pointer to input data buffer address (i32.const 7) ;; Length of input data buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer - (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len + (i32.sub (local.get $sp) (i32.const 4)) ;; Ptr to output buffer + (i32.sub (local.get $sp) (i32.const 8)) ;; Ptr to output buffer len ) ) ;; Check non-zero exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted + (i32.eq (local.get $exit_code) (i32.const 2)) ;; ReturnCode::CalleeReverted ) ;; Check that output buffer contains the expected return data. (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 8))) (i32.const 3)) + (i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 8))) (i32.const 3)) ) (call $assert (i32.eq - (i32.load (i32.sub (get_local $sp) (i32.const 4))) + (i32.load (i32.sub (local.get $sp) (i32.const 4))) (i32.const 0x00776655) ) ) ;; Fail to call the contract due to insufficient ref_time weight. - (set_local $exit_code + (local.set $exit_code (call $seal_call (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. @@ -211,11 +211,11 @@ ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + (i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Fail to call the contract due to insufficient proof_size weight. - (set_local $exit_code + (local.set $exit_code (call $seal_call (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. @@ -232,23 +232,23 @@ ;; Check for special trap exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped + (i32.eq (local.get $exit_code) (i32.const 1)) ;; ReturnCode::CalleeTrapped ) ;; Zero out destination buffer of output (i32.store - (i32.sub (get_local $sp) (i32.const 4)) + (i32.sub (local.get $sp) (i32.const 4)) (i32.const 0) ) ;; Length of the output buffer (i32.store - (i32.sub (get_local $sp) (i32.const 8)) + (i32.sub (local.get $sp) (i32.const 8)) (i32.const 4) ) ;; Call the contract successfully. - (set_local $exit_code + (local.set $exit_code (call $seal_call (i32.const 0) ;; Set no flag (i32.const 16) ;; Pointer to "callee" address. @@ -258,23 +258,23 @@ (i32.const 0) ;; Pointer to the buffer with value to transfer (i32.const 8) ;; Pointer to input data buffer address (i32.const 8) ;; Length of input data buffer - (i32.sub (get_local $sp) (i32.const 4)) ;; Ptr to output buffer - (i32.sub (get_local $sp) (i32.const 8)) ;; Ptr to output buffer len + (i32.sub (local.get $sp) (i32.const 4)) ;; Ptr to output buffer + (i32.sub (local.get $sp) (i32.const 8)) ;; Ptr to output buffer len ) ) ;; Check for success exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) ;; Check that the output buffer contains the expected return data. (call $assert - (i32.eq (i32.load (i32.sub (get_local $sp) (i32.const 8))) (i32.const 4)) + (i32.eq (i32.load (i32.sub (local.get $sp) (i32.const 8))) (i32.const 4)) ) (call $assert (i32.eq - (i32.load (i32.sub (get_local $sp) (i32.const 4))) + (i32.load (i32.sub (local.get $sp) (i32.const 4))) (i32.const 0x77665544) ) ) diff --git a/substrate/frame/contracts/fixtures/data/chain_extension.wat b/substrate/frame/contracts/fixtures/data/chain_extension.wat index 670f8e70172..c24ca286ff8 100644 --- a/substrate/frame/contracts/fixtures/data/chain_extension.wat +++ b/substrate/frame/contracts/fixtures/data/chain_extension.wat @@ -9,7 +9,7 @@ (func $assert (param i32) (block $ok - (br_if $ok (get_local 0)) + (br_if $ok (local.get 0)) (unreachable) ) ) diff --git a/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat b/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat index b481abb5bc7..504646df1b0 100644 --- a/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat +++ b/substrate/frame/contracts/fixtures/data/chain_extension_temp_storage.wat @@ -11,7 +11,7 @@ (func $assert (param i32) (block $ok - (br_if $ok (get_local 0)) + (br_if $ok (local.get 0)) (unreachable) ) ) diff --git a/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat index 5592e7e96a9..2bff53b638f 100644 --- a/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat +++ b/substrate/frame/contracts/fixtures/data/create_storage_and_call.wat @@ -8,7 +8,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat b/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat index cd720247843..00c9a657f39 100644 --- a/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat +++ b/substrate/frame/contracts/fixtures/data/create_storage_and_instantiate.wat @@ -14,7 +14,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/crypto_hashes.wat b/substrate/frame/contracts/fixtures/data/crypto_hashes.wat index c2b4d6b81ed..9d86b02f419 100644 --- a/substrate/frame/contracts/fixtures/data/crypto_hashes.wat +++ b/substrate/frame/contracts/fixtures/data/crypto_hashes.wat @@ -59,8 +59,10 @@ (call $seal_input (local.get $input_ptr) (local.get $input_len_ptr)) (local.set $chosen_hash_fn (i32.load8_u (local.get $input_ptr))) (if (i32.gt_u (local.get $chosen_hash_fn) (i32.const 7)) - ;; We check that the chosen hash fn identifier is within bounds: [0,7] - (unreachable) + (then + ;; We check that the chosen hash fn identifier is within bounds: [0,7] + (unreachable) + ) ) (local.set $input_ptr (i32.add (local.get $input_ptr) (i32.const 1))) (local.set $input_len (i32.sub (i32.load (local.get $input_len_ptr)) (i32.const 1))) diff --git a/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat b/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat index e8c447b42fc..dae0de88418 100644 --- a/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat +++ b/substrate/frame/contracts/fixtures/data/debug_message_invalid_utf8.wat @@ -8,7 +8,7 @@ (func $assert_eq (param i32 i32) (block $ok (br_if $ok - (i32.eq (get_local 0) (get_local 1)) + (i32.eq (local.get 0) (local.get 1)) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat b/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat index fc6ee72df8b..e9ce20ba42b 100644 --- a/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat +++ b/substrate/frame/contracts/fixtures/data/debug_message_logging_disabled.wat @@ -8,7 +8,7 @@ (func $assert_eq (param i32 i32) (block $ok (br_if $ok - (i32.eq (get_local 0) (get_local 1)) + (i32.eq (local.get 0) (local.get 1)) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/debug_message_works.wat b/substrate/frame/contracts/fixtures/data/debug_message_works.wat index 61933c23296..44a7b6db1be 100644 --- a/substrate/frame/contracts/fixtures/data/debug_message_works.wat +++ b/substrate/frame/contracts/fixtures/data/debug_message_works.wat @@ -8,7 +8,7 @@ (func $assert_eq (param i32 i32) (block $ok (br_if $ok - (i32.eq (get_local 0) (get_local 1)) + (i32.eq (local.get 0) (local.get 1)) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/delegate_call.wat b/substrate/frame/contracts/fixtures/data/delegate_call.wat index 7fe422af455..b8d4f0d47f0 100644 --- a/substrate/frame/contracts/fixtures/data/delegate_call.wat +++ b/substrate/frame/contracts/fixtures/data/delegate_call.wat @@ -24,7 +24,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -70,7 +70,7 @@ ) ;; Call deployed library contract code. - (set_local $exit_code + (local.set $exit_code (call $seal_delegate_call (i32.const 0) ;; Set no call flags (i32.const 64) ;; Pointer to "callee" code_hash. @@ -83,7 +83,7 @@ ;; Check for success exit status. (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) (call $assert diff --git a/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat b/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat index 340b9699f87..62eea32800a 100644 --- a/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat +++ b/substrate/frame/contracts/fixtures/data/delegate_call_lib.wat @@ -20,7 +20,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat b/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat index 24ae5a13e33..ba0a8fcc8ae 100644 --- a/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat +++ b/substrate/frame/contracts/fixtures/data/delegate_call_simple.wat @@ -12,7 +12,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat b/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat index 25554795552..2afd3b2fbac 100644 --- a/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat +++ b/substrate/frame/contracts/fixtures/data/destroy_and_transfer.wat @@ -33,7 +33,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/drain.wat b/substrate/frame/contracts/fixtures/data/drain.wat index cb8ff0aed61..18a21cca803 100644 --- a/substrate/frame/contracts/fixtures/data/drain.wat +++ b/substrate/frame/contracts/fixtures/data/drain.wat @@ -19,7 +19,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat b/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat index d694b3215e8..4910e706069 100644 --- a/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat +++ b/substrate/frame/contracts/fixtures/data/ecdsa_recover.wat @@ -12,7 +12,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/event_size.wat b/substrate/frame/contracts/fixtures/data/event_size.wat index 4bd6158d72f..1c1f34b24d7 100644 --- a/substrate/frame/contracts/fixtures/data/event_size.wat +++ b/substrate/frame/contracts/fixtures/data/event_size.wat @@ -9,7 +9,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/multi_store.wat b/substrate/frame/contracts/fixtures/data/multi_store.wat index 2592baf6183..c334ed54c4e 100644 --- a/substrate/frame/contracts/fixtures/data/multi_store.wat +++ b/substrate/frame/contracts/fixtures/data/multi_store.wat @@ -19,7 +19,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat index c6b529e2aff..44db8d041b1 100644 --- a/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat +++ b/substrate/frame/contracts/fixtures/data/reentrance_count_call.wat @@ -20,7 +20,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -36,11 +36,11 @@ (call $seal_input (i32.const 32) (i32.const 36)) ;; reading manually passed reentrant count - (set_local $expected_reentrance_count (i32.load (i32.const 32))) + (local.set $expected_reentrance_count (i32.load (i32.const 32))) ;; reentrance count is calculated correctly (call $assert - (i32.eq (call $reentrance_count) (get_local $expected_reentrance_count)) + (i32.eq (call $reentrance_count) (local.get $expected_reentrance_count)) ) ;; re-enter 5 times in a row and assert that the reentrant counter works as expected @@ -52,7 +52,7 @@ (i32.store (i32.const 32) (i32.add (i32.load (i32.const 32)) (i32.const 1))) ;; Call to itself - (set_local $seal_call_exit_code + (local.set $seal_call_exit_code (call $seal_call (i32.const 8) ;; Allow reentrancy flag set (i32.const 0) ;; Pointer to "callee" address @@ -66,7 +66,7 @@ ) (call $assert - (i32.eq (get_local $seal_call_exit_code) (i32.const 0)) + (i32.eq (local.get $seal_call_exit_code) (i32.const 0)) ) ) ) diff --git a/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat b/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat index b8219a8462e..49e0193bcdb 100644 --- a/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat +++ b/substrate/frame/contracts/fixtures/data/reentrance_count_delegated_call.wat @@ -17,7 +17,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -30,7 +30,7 @@ (call $seal_input (i32.const 0) (i32.const 36)) ;; reading passed callstack height - (set_local $callstack_height (i32.load (i32.const 32))) + (local.set $callstack_height (i32.load (i32.const 32))) ;; incrementing callstack height (i32.store (i32.const 32) (i32.add (i32.load (i32.const 32)) (i32.const 1))) @@ -40,12 +40,12 @@ (i32.eq (call $reentrance_count) (i32.const 0)) ) - (i32.eq (get_local $callstack_height) (i32.const 5)) + (i32.eq (local.get $callstack_height) (i32.const 5)) (if (then) ;; exit recursion case (else ;; Call to itself - (set_local $delegate_call_exit_code + (local.set $delegate_call_exit_code (call $seal_delegate_call (i32.const 0) ;; Set no call flags (i32.const 0) ;; Pointer to "callee" code_hash. @@ -57,13 +57,13 @@ ) (call $assert - (i32.eq (get_local $delegate_call_exit_code) (i32.const 0)) + (i32.eq (local.get $delegate_call_exit_code) (i32.const 0)) ) ) ) (call $assert - (i32.le_s (get_local $callstack_height) (i32.const 5)) + (i32.le_s (local.get $callstack_height) (i32.const 5)) ) ) diff --git a/substrate/frame/contracts/fixtures/data/self_destruct.wat b/substrate/frame/contracts/fixtures/data/self_destruct.wat index b8a37306e20..00c3895fdde 100644 --- a/substrate/frame/contracts/fixtures/data/self_destruct.wat +++ b/substrate/frame/contracts/fixtures/data/self_destruct.wat @@ -26,7 +26,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat b/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat index 85fce511e21..628f283a19f 100644 --- a/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat +++ b/substrate/frame/contracts/fixtures/data/self_destructing_constructor.wat @@ -5,7 +5,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/set_code_hash.wat b/substrate/frame/contracts/fixtures/data/set_code_hash.wat index b4df1b13318..c0a9557b4d0 100644 --- a/substrate/frame/contracts/fixtures/data/set_code_hash.wat +++ b/substrate/frame/contracts/fixtures/data/set_code_hash.wat @@ -16,7 +16,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -27,11 +27,11 @@ (call $seal_input (i32.const 0) (i32.const 32)) - (set_local $exit_code + (local.set $exit_code (call $seal_set_code_hash (i32.const 0)) ;; Pointer to the input data. ) (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) ;; we return 1 after setting new code_hash diff --git a/substrate/frame/contracts/fixtures/data/storage_size.wat b/substrate/frame/contracts/fixtures/data/storage_size.wat index 293a656d4f6..728bb4fcf3c 100644 --- a/substrate/frame/contracts/fixtures/data/storage_size.wat +++ b/substrate/frame/contracts/fixtures/data/storage_size.wat @@ -20,7 +20,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/store_call.wat b/substrate/frame/contracts/fixtures/data/store_call.wat index 9e090d31801..746b7a48b55 100644 --- a/substrate/frame/contracts/fixtures/data/store_call.wat +++ b/substrate/frame/contracts/fixtures/data/store_call.wat @@ -15,7 +15,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/store_deploy.wat b/substrate/frame/contracts/fixtures/data/store_deploy.wat index cc428e9623b..7f115cba977 100644 --- a/substrate/frame/contracts/fixtures/data/store_deploy.wat +++ b/substrate/frame/contracts/fixtures/data/store_deploy.wat @@ -15,7 +15,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/xcm_execute.wat b/substrate/frame/contracts/fixtures/data/xcm_execute.wat index b3459996a2e..72ef14ed82c 100644 --- a/substrate/frame/contracts/fixtures/data/xcm_execute.wat +++ b/substrate/frame/contracts/fixtures/data/xcm_execute.wat @@ -12,7 +12,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/fixtures/data/xcm_send.wat b/substrate/frame/contracts/fixtures/data/xcm_send.wat index 9eec6388de9..fe29ddf0f14 100644 --- a/substrate/frame/contracts/fixtures/data/xcm_send.wat +++ b/substrate/frame/contracts/fixtures/data/xcm_send.wat @@ -12,7 +12,7 @@ (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index 77e94b16777..f73655e9920 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -1506,7 +1506,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1531,7 +1531,7 @@ mod tests { ) ;; Find out the size of the buffer - (set_local $buf_size + (local.set $buf_size (i32.load (i32.const 32)) ) @@ -1539,7 +1539,7 @@ mod tests { (call $seal_return (i32.const 0) (i32.const 36) - (get_local $buf_size) + (local.get $buf_size) ) ;; env:seal_return doesn't return, so this is effectively unreachable. @@ -1575,7 +1575,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1633,7 +1633,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1680,7 +1680,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1726,7 +1726,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1773,7 +1773,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1836,7 +1836,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1925,7 +1925,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -1966,7 +1966,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2013,7 +2013,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2067,7 +2067,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2137,7 +2137,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2327,7 +2327,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -2995,7 +2995,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -3047,7 +3047,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) @@ -3162,18 +3162,18 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) ) (func (export "call") (local $exit_code i32) - (set_local $exit_code + (local.set $exit_code (call $seal_set_code_hash (i32.const 0)) ) (call $assert - (i32.eq (get_local $exit_code) (i32.const 0)) ;; ReturnCode::Success + (i32.eq (local.get $exit_code) (i32.const 0)) ;; ReturnCode::Success ) ) @@ -3202,18 +3202,18 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) ) (func (export "call") (local $return_val i32) - (set_local $return_val + (local.set $return_val (call $reentrance_count) ) (call $assert - (i32.eq (get_local $return_val) (i32.const 12)) + (i32.eq (local.get $return_val) (i32.const 12)) ) ) @@ -3234,18 +3234,18 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) ) (func (export "call") (local $return_val i32) - (set_local $return_val + (local.set $return_val (call $account_reentrance_count (i32.const 0)) ) (call $assert - (i32.eq (get_local $return_val) (i32.const 12)) + (i32.eq (local.get $return_val) (i32.const 12)) ) ) @@ -3267,7 +3267,7 @@ mod tests { (func $assert (param i32) (block $ok (br_if $ok - (get_local 0) + (local.get 0) ) (unreachable) ) -- GitLab From aada961da9400d6f1e1fcccecd5aac627dd230c4 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Tue, 28 Nov 2023 21:07:40 +0100 Subject: [PATCH 148/199] [ci] Run gitspiegel trigger with merge conflicts (#2531) Currently gitspiegel trigger won't run if there is merge conflict. This PR fixes it. close https://github.com/paritytech/gitspiegel/issues/183 --- .github/workflows/gitspiegel-trigger.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/gitspiegel-trigger.yml b/.github/workflows/gitspiegel-trigger.yml index 59347fad6d6..b338f7a3f62 100644 --- a/.github/workflows/gitspiegel-trigger.yml +++ b/.github/workflows/gitspiegel-trigger.yml @@ -13,8 +13,19 @@ on: - unlocked - ready_for_review - reopened + # the job doesn't check out any code, so it is relatively safe to run it on any event + pull_request_target: + types: + - opened + - synchronize + - unlocked + - ready_for_review + - reopened merge_group: +# drop all permissions for GITHUB_TOKEN +permissions: {} + jobs: sync: runs-on: ubuntu-latest -- GitLab From 820179348c1c56bbe00236be6115a7bab451d1fd Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 28 Nov 2023 23:13:36 +0100 Subject: [PATCH 149/199] Remove long deprecated `AllPalletsWithoutSystemReversed` (#2509) Remove deprecated `AllPalletsXY` types. They have been deprecated for nearly 1.5 years now, I think its fine to remove them. If anyone feels like we should first put a date on the deprecation as stated in the deprecation guideline, feel free to speak up. To me it looks like this has been forgotten and can be directly removed. --- substrate/frame/executive/README.md | 6 +- .../procedural/src/construct_runtime/mod.rs | 54 -------- substrate/frame/support/test/tests/pallet.rs | 117 ------------------ 3 files changed, 2 insertions(+), 175 deletions(-) diff --git a/substrate/frame/executive/README.md b/substrate/frame/executive/README.md index 96a412a4537..6151232ecaf 100644 --- a/substrate/frame/executive/README.md +++ b/substrate/frame/executive/README.md @@ -34,14 +34,13 @@ The default Substrate node template declares the `Executive` type declaration from the node template. ```rust -# /// Executive: handles dispatch to the various modules. pub type Executive = executive::Executive< Runtime, Block, Context, Runtime, - AllPallets, + AllPalletsWithSystem, >; ``` @@ -51,7 +50,6 @@ You can add custom logic that should be called in your runtime on a runtime upgr generic parameter. The custom logic will be called before the on runtime upgrade logic of all modules is called. ```rust -# struct CustomOnRuntimeUpgrade; impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { fn on_runtime_upgrade() -> frame_support::weights::Weight { @@ -65,7 +63,7 @@ pub type Executive = executive::Executive< Block, Context, Runtime, - AllPallets, + AllPalletsWithSystem, CustomOnRuntimeUpgrade, >; ``` diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index ce34694275b..d1d6040d33a 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -600,66 +600,12 @@ fn decl_all_pallets<'a>( } }); - let all_pallets_without_system_reversed = attribute_to_names.iter().map(|(attr, names)| { - let names = names.iter().filter(|n| **n != SYSTEM_PALLET_NAME).rev(); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// Excludes the System pallet. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithoutSystemReversed = ( #(#names,)* ); - } - }); - - let all_pallets_with_system_reversed = attribute_to_names.iter().map(|(attr, names)| { - let names = names.iter().rev(); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsWithSystemReversed = ( #(#names,)* ); - } - }); - - let all_pallets_reversed_with_system_first = attribute_to_names.iter().map(|(attr, names)| { - let system = quote::format_ident!("{}", SYSTEM_PALLET_NAME); - let names = std::iter::once(&system) - .chain(names.iter().rev().filter(|n| **n != SYSTEM_PALLET_NAME).cloned()); - quote! { - #attr - /// All pallets included in the runtime as a nested tuple of types in reversed order. - /// With the system pallet first. - #[deprecated(note = "Using reverse pallet orders is deprecated. use only \ - `AllPalletsWithSystem or AllPalletsWithoutSystem`")] - pub type AllPalletsReversedWithSystemFirst = ( #(#names,)* ); - } - }); - quote!( #types - /// All pallets included in the runtime as a nested tuple of types. - #[deprecated(note = "The type definition has changed from representing all pallets \ - excluding system, in reversed order to become the representation of all pallets \ - including system pallet in regular order. For this reason it is encouraged to use \ - explicitly one of `AllPalletsWithSystem`, `AllPalletsWithoutSystem`, \ - `AllPalletsWithSystemReversed`, `AllPalletsWithoutSystemReversed`. \ - Note that the type `frame_executive::Executive` expects one of `AllPalletsWithSystem` \ - , `AllPalletsWithSystemReversed`, `AllPalletsReversedWithSystemFirst`. More details in \ - https://github.com/paritytech/substrate/pull/10043")] - pub type AllPallets = AllPalletsWithSystem; - #( #all_pallets_with_system )* #( #all_pallets_without_system )* - - #( #all_pallets_with_system_reversed )* - - #( #all_pallets_without_system_reversed )* - - #( #all_pallets_reversed_with_system_first )* ) } fn decl_pallet_runtime_setup( diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 1afb8fdf497..d2e5b185163 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -1271,52 +1271,6 @@ fn pallet_hooks_expand() { }) } -#[test] -fn all_pallets_type_reversed_order_is_correct() { - TestExternalities::default().execute_with(|| { - frame_system::Pallet::::set_block_number(1); - - #[allow(deprecated)] - { - assert_eq!( - AllPalletsWithoutSystemReversed::on_initialize(1), - Weight::from_parts(10, 0) - ); - AllPalletsWithoutSystemReversed::on_finalize(1); - - assert_eq!( - AllPalletsWithoutSystemReversed::on_runtime_upgrade(), - Weight::from_parts(30, 0) - ); - } - - assert_eq!( - frame_system::Pallet::::events()[0].event, - RuntimeEvent::Example2(pallet2::Event::Something(11)), - ); - assert_eq!( - frame_system::Pallet::::events()[1].event, - RuntimeEvent::Example(pallet::Event::Something(10)), - ); - assert_eq!( - frame_system::Pallet::::events()[2].event, - RuntimeEvent::Example2(pallet2::Event::Something(21)), - ); - assert_eq!( - frame_system::Pallet::::events()[3].event, - RuntimeEvent::Example(pallet::Event::Something(20)), - ); - assert_eq!( - frame_system::Pallet::::events()[4].event, - RuntimeEvent::Example2(pallet2::Event::Something(31)), - ); - assert_eq!( - frame_system::Pallet::::events()[5].event, - RuntimeEvent::Example(pallet::Event::Something(30)), - ); - }) -} - #[test] fn pallet_on_genesis() { TestExternalities::default().execute_with(|| { @@ -2186,31 +2140,6 @@ fn test_storage_info() { ); } -#[test] -fn assert_type_all_pallets_reversed_with_system_first_is_correct() { - // Just ensure the 2 types are same. - #[allow(deprecated)] - fn _a(_t: AllPalletsReversedWithSystemFirst) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (System, Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (System, Example4, Example3, Example2, Example)) { - _a(t) - } - - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (System, Example5, Example4, Example2, Example)) { - _a(t) - } - - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (System, Example5, Example4, Example3, Example2, Example)) { - _a(t) - } -} - #[test] fn assert_type_all_pallets_with_system_is_correct() { // Just ensure the 2 types are same. @@ -2255,52 +2184,6 @@ fn assert_type_all_pallets_without_system_is_correct() { } } -#[test] -fn assert_type_all_pallets_with_system_reversed_is_correct() { - // Just ensure the 2 types are same. - #[allow(deprecated)] - fn _a(_t: AllPalletsWithSystemReversed) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example2, Example, System)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example3, Example2, Example, System)) { - _a(t) - } - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example2, Example, System)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example3, Example2, Example, System)) { - _a(t) - } -} - -#[test] -fn assert_type_all_pallets_without_system_reversed_is_correct() { - // Just ensure the 2 types are same. - #[allow(deprecated)] - fn _a(_t: AllPalletsWithoutSystemReversed) {} - #[cfg(all(not(feature = "frame-feature-testing"), not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", not(feature = "frame-feature-testing-2")))] - fn _b(t: (Example4, Example3, Example2, Example)) { - _a(t) - } - #[cfg(all(not(feature = "frame-feature-testing"), feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example2, Example)) { - _a(t) - } - #[cfg(all(feature = "frame-feature-testing", feature = "frame-feature-testing-2"))] - fn _b(t: (Example5, Example4, Example3, Example2, Example)) { - _a(t) - } -} - #[test] fn test_storage_alias() { use frame_support::Twox64Concat; -- GitLab From a9aa2d1f1c7b73ddfa69f8fb61f3035d091018d2 Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Wed, 29 Nov 2023 10:44:44 +0400 Subject: [PATCH 150/199] Remove `dmp_queue` pallet from Westend SP runtimes (#2516) Westend SP dmp queue pallet removal is complete. Screenshot 2023-11-28 at 08 31 27 Screenshot 2023-11-28 at 08 32 08 Screenshot 2023-11-28 at 08 31 45 --- Cargo.lock | 3 - .../assets/asset-hub-westend/Cargo.toml | 96 ++++++------- .../assets/asset-hub-westend/src/lib.rs | 8 -- .../src/weights/cumulus_pallet_dmp_queue.rs | 131 ------------------ .../asset-hub-westend/src/weights/mod.rs | 1 - .../bridge-hubs/bridge-hub-westend/Cargo.toml | 84 ++++++----- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 7 - .../src/weights/cumulus_pallet_dmp_queue.rs | 131 ------------------ .../bridge-hub-westend/src/weights/mod.rs | 1 - .../collectives-westend/Cargo.toml | 8 +- .../collectives-westend/src/lib.rs | 8 -- .../src/weights/cumulus_pallet_dmp_queue.rs | 131 ------------------ .../collectives-westend/src/weights/mod.rs | 1 - 13 files changed, 87 insertions(+), 523 deletions(-) delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs diff --git a/Cargo.lock b/Cargo.lock index 4fa9e83a173..871d46ea844 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1050,7 +1050,6 @@ dependencies = [ "bp-bridge-hub-rococo", "bp-bridge-hub-westend", "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", @@ -2321,7 +2320,6 @@ dependencies = [ "bridge-hub-test-utils", "bridge-runtime-common", "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", @@ -2940,7 +2938,6 @@ name = "collectives-westend-runtime" version = "1.0.0" dependencies = [ "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-session-benchmarking", "cumulus-pallet-xcm", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index 49b80b067cf..bc06c6fe9a1 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -14,63 +14,62 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false} -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false } +pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false } +pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } +pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # num-traits feature needed for dex integer sq root: primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "scale-info", "num-traits"] } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -98,7 +97,6 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", default = [ "std" ] runtime-benchmarks = [ "assets-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -133,7 +131,6 @@ runtime-benchmarks = [ ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -172,7 +169,6 @@ std = [ "bp-bridge-hub-westend/std", "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index a0d5a528c3f..4eb3e2471ce 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -667,12 +667,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - parameter_types! { pub const Period: u32 = 6 * HOURS; pub const Offset: u32 = 0; @@ -890,7 +884,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, // Bridge utilities. ToRococoXcmRouter: pallet_xcm_bridge_hub_router::::{Pallet, Storage, Call} = 34, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 35, @@ -1082,7 +1075,6 @@ mod benches { [pallet_timestamp, Timestamp] [pallet_collator_selection, CollatorSelection] [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] [pallet_xcm_bridge_hub_router, ToRococo] // XCM [pallet_xcm, PalletXcmExtrinsicsBenchmark::] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs index 1646c00989d..2462138b04a 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/mod.rs @@ -14,7 +14,6 @@ // limitations under the License. pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 7e384126ab6..4d2e60d971d 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -18,54 +18,53 @@ serde = { version = "1.0.188", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/constants", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -115,7 +114,6 @@ std = [ "bridge-runtime-common/std", "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", @@ -177,7 +175,6 @@ std = [ runtime-benchmarks = [ "bridge-runtime-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -209,7 +206,6 @@ runtime-benchmarks = [ try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 764b9825d4e..7b8ea1b7734 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -393,12 +393,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -499,7 +493,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, // Handy utilities. Utility: pallet_utility::{Pallet, Call, Event} = 40, diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs index 833944ebfa5..ee49c72ea5f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/mod.rs @@ -21,7 +21,6 @@ use ::pallet_bridge_messages::WeightInfoExt as MessagesWeightInfoExt; use ::pallet_bridge_parachains::WeightInfoExt as ParachainsWeightInfoExt; pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 79d6c697b5f..1801488eeed 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -72,10 +72,7 @@ westend-runtime-constants = { path = "../../../../../polkadot/runtime/westend/co # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -96,7 +93,6 @@ sp-io = { path = "../../../../../substrate/primitives/io", features = ["std"] } [features] default = [ "std" ] runtime-benchmarks = [ - "cumulus-pallet-dmp-queue/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", "cumulus-pallet-xcmp-queue/runtime-benchmarks", @@ -133,7 +129,6 @@ runtime-benchmarks = [ ] try-runtime = [ "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", "cumulus-pallet-parachain-system/try-runtime", "cumulus-pallet-xcm/try-runtime", "cumulus-pallet-xcmp-queue/try-runtime", @@ -169,7 +164,6 @@ try-runtime = [ std = [ "codec/std", "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", "cumulus-pallet-parachain-system/std", "cumulus-pallet-session-benchmarking/std", "cumulus-pallet-xcm/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 54cb898fd6c..1017ac30499 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -456,12 +456,6 @@ parameter_types! { pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; } -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - pub const PERIOD: u32 = 6 * HOURS; pub const OFFSET: u32 = 0; @@ -651,7 +645,6 @@ construct_runtime!( XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, // Handy utilities. @@ -740,7 +733,6 @@ mod benches { [pallet_collator_selection, CollatorSelection] [cumulus_pallet_parachain_system, ParachainSystem] [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] [pallet_alliance, Alliance] [pallet_collective, AllianceMotion] [pallet_xcm, PalletXcmExtrinsicsBenchmark::] diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs index 1d877fdbd2b..d49a2905e7f 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/mod.rs @@ -14,7 +14,6 @@ // limitations under the License. pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; pub mod cumulus_pallet_parachain_system; pub mod cumulus_pallet_xcmp_queue; pub mod extrinsic_weights; -- GitLab From 1d5d4a484021ede73152bf71af37718fa38bb72b Mon Sep 17 00:00:00 2001 From: Julian Eager Date: Wed, 29 Nov 2023 14:46:10 +0800 Subject: [PATCH 151/199] Enable parallel key scraping (#1985) closes #174 --------- Co-authored-by: Liam Aharon Co-authored-by: Oliver Tale-Yazdi --- .../frame/remote-externalities/src/lib.rs | 193 ++++++++++++++---- 1 file changed, 156 insertions(+), 37 deletions(-) diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 71e9320ebee..5c7a36867ff 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -47,6 +47,7 @@ use std::{ fs, ops::{Deref, DerefMut}, path::{Path, PathBuf}, + sync::Arc, time::{Duration, Instant}, }; use substrate_rpc_client::{rpc_params, BatchRequestBuilder, ChainApi, ClientT, StateApi}; @@ -298,6 +299,7 @@ impl Default for SnapshotConfig { } /// Builder for remote-externalities. +#[derive(Clone)] pub struct Builder { /// Custom key-pairs to be injected into the final externalities. The *hashed* keys and values /// must be given. @@ -400,41 +402,134 @@ where }) } - /// Get all the keys at `prefix` at `hash` using the paged, safe RPC methods. - async fn rpc_get_keys_paged( + /// Get keys with `prefix` at `block` in a parallel manner. + async fn rpc_get_keys_parallel( &self, - prefix: StorageKey, - at: B::Hash, + prefix: &StorageKey, + block: B::Hash, + parallel: usize, + ) -> Result, &'static str> { + /// Divide the workload and return the start key of each chunks. Guaranteed to return a + /// non-empty list. + fn gen_start_keys(prefix: &StorageKey) -> Vec { + let mut prefix = prefix.as_ref().to_vec(); + let scale = 32usize.saturating_sub(prefix.len()); + + // no need to divide workload + if scale < 9 { + prefix.extend(vec![0; scale]); + return vec![StorageKey(prefix)] + } + + let chunks = 16; + let step = 0x10000 / chunks; + let ext = scale - 2; + + (0..chunks) + .map(|i| { + let mut key = prefix.clone(); + let start = i * step; + key.extend(vec![(start >> 8) as u8, (start & 0xff) as u8]); + key.extend(vec![0; ext]); + StorageKey(key) + }) + .collect() + } + + let start_keys = gen_start_keys(&prefix); + let start_keys: Vec> = start_keys.iter().map(Some).collect(); + let mut end_keys: Vec> = start_keys[1..].to_vec(); + end_keys.push(None); + + // use a semaphore to limit max scraping tasks + let parallel = Arc::new(tokio::sync::Semaphore::new(parallel)); + let builder = Arc::new(self.clone()); + let mut handles = vec![]; + + for (start_key, end_key) in start_keys.into_iter().zip(end_keys) { + let permit = parallel + .clone() + .acquire_owned() + .await + .expect("semaphore is not closed until the end of loop"); + + let builder = builder.clone(); + let prefix = prefix.clone(); + let start_key = start_key.cloned(); + let end_key = end_key.cloned(); + + let handle = tokio::spawn(async move { + let res = builder + .rpc_get_keys_in_range(&prefix, block, start_key.as_ref(), end_key.as_ref()) + .await; + drop(permit); + res + }); + + handles.push(handle); + } + + parallel.close(); + + let keys = futures::future::join_all(handles) + .await + .into_iter() + .filter_map(|res| match res { + Ok(Ok(keys)) => Some(keys), + _ => None, + }) + .flatten() + .collect::>(); + + Ok(keys) + } + + /// Get all keys with `prefix` within the given range at `block`. + /// Both `start_key` and `end_key` are optional if you want an open-ended range. + async fn rpc_get_keys_in_range( + &self, + prefix: &StorageKey, + block: B::Hash, + start_key: Option<&StorageKey>, + end_key: Option<&StorageKey>, ) -> Result, &'static str> { - let mut last_key: Option = None; - let mut all_keys: Vec = vec![]; - let keys = loop { + let mut last_key: Option<&StorageKey> = start_key; + let mut keys: Vec = vec![]; + + loop { // This loop can hit the node with very rapid requests, occasionally causing it to // error out in CI (https://github.com/paritytech/substrate/issues/14129), so we retry. let retry_strategy = FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); let get_page_closure = - || self.get_keys_single_page(Some(prefix.clone()), last_key.clone(), at); - let page = Retry::spawn(retry_strategy, get_page_closure).await?; - let page_len = page.len(); + || self.get_keys_single_page(Some(prefix.clone()), last_key.cloned(), block); + let mut page = Retry::spawn(retry_strategy, get_page_closure).await?; - all_keys.extend(page); + // avoid duplicated keys across workloads + if let (Some(last), Some(end)) = (page.last(), end_key) { + if last >= end { + page.retain(|key| key < end); + } + } + let page_len = page.len(); + keys.extend(page); + last_key = keys.last(); + + // scraping out of range or no more matches, + // we are done either way if page_len < Self::DEFAULT_KEY_DOWNLOAD_PAGE as usize { log::debug!(target: LOG_TARGET, "last page received: {}", page_len); - break all_keys - } else { - let new_last_key = - all_keys.last().expect("all_keys is populated; has .last(); qed"); - log::debug!( - target: LOG_TARGET, - "new total = {}, full page received: {}", - all_keys.len(), - HexDisplay::from(new_last_key) - ); - last_key = Some(new_last_key.clone()); - }; - }; + break + } + + log::debug!( + target: LOG_TARGET, + "new total = {}, full page received: {}", + keys.len(), + HexDisplay::from(last_key.expect("full page received, cannot be None")) + ); + } Ok(keys) } @@ -529,7 +624,7 @@ where "Batch request failed ({}/{} retries). Error: {}", retries, Self::MAX_RETRIES, - e.to_string() + e ); // after 2 subsequent failures something very wrong is happening. log a warning // and reset the batch size down to 1. @@ -590,7 +685,7 @@ where /// map them to values one by one. /// /// This can work with public nodes. But, expect it to be darn slow. - pub(crate) async fn rpc_get_pairs_paged( + pub(crate) async fn rpc_get_pairs( &self, prefix: StorageKey, at: B::Hash, @@ -598,8 +693,10 @@ where ) -> Result, &'static str> { let start = Instant::now(); let mut sp = Spinner::with_timer(Spinners::Dots, "Scraping keys...".into()); + // TODO We could start downloading when having collected the first batch of keys + // https://github.com/paritytech/polkadot-sdk/issues/2494 let keys = self - .rpc_get_keys_paged(prefix.clone(), at) + .rpc_get_keys_parallel(&prefix, at, Self::PARALLEL_REQUESTS) .await? .into_iter() .collect::>(); @@ -628,9 +725,9 @@ where .unwrap() .progress_chars("=>-"), ); - let payloads_chunked = payloads.chunks((&payloads.len() / Self::PARALLEL_REQUESTS).max(1)); + let payloads_chunked = payloads.chunks((payloads.len() / Self::PARALLEL_REQUESTS).max(1)); let requests = payloads_chunked.map(|payload_chunk| { - Self::get_storage_data_dynamic_batch_size(&client, payload_chunk.to_vec(), &bar) + Self::get_storage_data_dynamic_batch_size(client, payload_chunk.to_vec(), &bar) }); // Execute the requests and move the Result outside. let storage_data_result: Result, _> = @@ -644,7 +741,7 @@ where }, }; bar.finish_with_message("✅ Downloaded key values"); - print!("\n"); + println!(); // Check if we got responses for all submitted requests. assert_eq!(keys.len(), storage_data.len()); @@ -778,8 +875,9 @@ where pending_ext: &mut TestExternalities>, ) -> Result { let child_roots = top_kv - .into_iter() - .filter_map(|(k, _)| is_default_child_storage_key(k.as_ref()).then(|| k.clone())) + .iter() + .filter(|(k, _)| is_default_child_storage_key(k.as_ref())) + .map(|(k, _)| k.clone()) .collect::>(); if child_roots.is_empty() { @@ -799,11 +897,10 @@ where let mut child_kv = vec![]; for prefixed_top_key in child_roots { let child_keys = - Self::rpc_child_get_keys(&client, &prefixed_top_key, StorageKey(vec![]), at) - .await?; + Self::rpc_child_get_keys(client, &prefixed_top_key, StorageKey(vec![]), at).await?; let child_kv_inner = - Self::rpc_child_get_storage_paged(&client, &prefixed_top_key, child_keys, at) + Self::rpc_child_get_storage_paged(client, &prefixed_top_key, child_keys, at) .await?; let prefixed_top_key = PrefixedStorageKey::new(prefixed_top_key.clone().0); @@ -846,7 +943,7 @@ where for prefix in &config.hashed_prefixes { let now = std::time::Instant::now(); let additional_key_values = - self.rpc_get_pairs_paged(StorageKey(prefix.to_vec()), at, pending_ext).await?; + self.rpc_get_pairs(StorageKey(prefix.to_vec()), at, pending_ext).await?; let elapsed = now.elapsed(); log::info!( target: LOG_TARGET, @@ -1110,7 +1207,7 @@ mod test_prelude { pub(crate) type Block = RawBlock>; pub(crate) fn init_logger() { - let _ = sp_tracing::try_init_simple(); + sp_tracing::try_init_simple(); } } @@ -1440,4 +1537,26 @@ mod remote_tests { .unwrap() .execute_with(|| {}); } + + #[tokio::test] + async fn can_fetch_in_parallel() { + init_logger(); + + let uri = String::from("wss://kusama-bridge-hub-rpc.polkadot.io:443"); + let mut builder = Builder::::new() + .mode(Mode::Online(OnlineConfig { transport: uri.into(), ..Default::default() })); + builder.init_remote_client().await.unwrap(); + + let at = builder.as_online().at.unwrap(); + + let prefix = StorageKey(vec![13]); + let paged = builder.rpc_get_keys_in_range(&prefix, at, None, None).await.unwrap(); + let para = builder.rpc_get_keys_parallel(&prefix, at, 4).await.unwrap(); + assert_eq!(paged, para); + + let prefix = StorageKey(vec![]); + let paged = builder.rpc_get_keys_in_range(&prefix, at, None, None).await.unwrap(); + let para = builder.rpc_get_keys_parallel(&prefix, at, 8).await.unwrap(); + assert_eq!(paged, para); + } } -- GitLab From 39d6c95c0daa4e42c30836919150a67317f0e408 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 29 Nov 2023 10:30:09 +0100 Subject: [PATCH 152/199] substrate-node: `NativeElseWasmExecutor` is no longer used (#2521) This PR removes `NativeElseWasmExecutor` usage from substrate node. Instead [`WasmExecutor<(sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions)>`](https://github.com/paritytech/polkadot-sdk/blob/49a41ab3bb3f630c20e5b24cec8d92382404631c/substrate/bin/node/executor/src/lib.rs#L26) is used. Related to #2358. --------- Co-authored-by: Davide Galassi --- Cargo.lock | 57 +++------ Cargo.toml | 1 - .../bin/node-template/node/src/service.rs | 30 +---- substrate/bin/node/cli/Cargo.toml | 40 ++++++- .../bench.rs => cli/benches/executor.rs} | 109 +++++++----------- substrate/bin/node/cli/src/command.rs | 3 +- substrate/bin/node/cli/src/service.rs | 24 +++- .../bin/node/{executor => cli}/tests/basic.rs | 0 .../node/{executor => cli}/tests/common.rs | 9 +- .../bin/node/{executor => cli}/tests/fees.rs | 0 .../tests/res/default_genesis_config.json | 0 .../tests/submit_transaction.rs | 0 substrate/bin/node/executor/Cargo.toml | 57 --------- substrate/bin/node/executor/src/lib.rs | 40 ------- substrate/bin/node/inspect/Cargo.toml | 8 ++ substrate/bin/node/inspect/src/command.rs | 10 +- substrate/bin/node/testing/Cargo.toml | 2 +- substrate/bin/node/testing/src/bench.rs | 14 +-- substrate/bin/node/testing/src/client.rs | 19 ++- 19 files changed, 164 insertions(+), 259 deletions(-) rename substrate/bin/node/{executor/benches/bench.rs => cli/benches/executor.rs} (71%) rename substrate/bin/node/{executor => cli}/tests/basic.rs (100%) rename substrate/bin/node/{executor => cli}/tests/common.rs (95%) rename substrate/bin/node/{executor => cli}/tests/fees.rs (100%) rename substrate/bin/node/{executor => cli}/tests/res/default_genesis_config.json (100%) rename substrate/bin/node/{executor => cli}/tests/submit_transaction.rs (100%) delete mode 100644 substrate/bin/node/executor/Cargo.toml delete mode 100644 substrate/bin/node/executor/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 871d46ea844..bf4efa60ad7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8928,7 +8928,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-timestamp", - "staging-node-executor", + "staging-node-cli", "substrate-test-client", "tempfile", ] @@ -18358,7 +18358,9 @@ dependencies = [ "clap 4.4.6", "clap_complete", "criterion 0.4.0", + "frame-benchmarking", "frame-benchmarking-cli", + "frame-support", "frame-system", "frame-system-rpc-runtime-api", "futures", @@ -18368,13 +18370,20 @@ dependencies = [ "nix 0.26.2", "node-primitives", "node-rpc", + "node-testing", "pallet-asset-conversion-tx-payment", "pallet-asset-tx-payment", "pallet-assets", "pallet-balances", + "pallet-contracts", + "pallet-glutton", "pallet-im-online", + "pallet-root-testing", "pallet-skip-feeless-payment", + "pallet-sudo", "pallet-timestamp", + "pallet-transaction-payment", + "pallet-treasury", "parity-scale-codec", "platforms", "rand 0.8.5", @@ -18409,27 +18418,31 @@ dependencies = [ "sc-telemetry", "sc-transaction-pool", "sc-transaction-pool-api", + "scale-info", "serde", "serde_json", "soketto", "sp-api", + "sp-application-crypto", "sp-authority-discovery", "sp-blockchain", "sp-consensus", "sp-consensus-babe", "sp-consensus-grandpa", "sp-core", + "sp-externalities 0.19.0", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-mixnet", "sp-runtime", + "sp-state-machine", "sp-statement-store", "sp-timestamp", "sp-tracing 10.0.0", "sp-transaction-storage-proof", - "staging-node-executor", + "sp-trie", "staging-node-inspect", "substrate-build-script-utils", "substrate-cli-test-utils", @@ -18440,44 +18453,6 @@ dependencies = [ "tokio-util", "try-runtime-cli", "wait-timeout", -] - -[[package]] -name = "staging-node-executor" -version = "3.0.0-dev" -dependencies = [ - "criterion 0.4.0", - "frame-benchmarking", - "frame-support", - "frame-system", - "futures", - "kitchensink-runtime", - "node-primitives", - "node-testing", - "pallet-balances", - "pallet-contracts", - "pallet-glutton", - "pallet-im-online", - "pallet-root-testing", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-treasury", - "parity-scale-codec", - "sc-executor", - "scale-info", - "serde_json", - "sp-application-crypto", - "sp-consensus-babe", - "sp-core", - "sp-externalities 0.19.0", - "sp-keyring", - "sp-keystore", - "sp-runtime", - "sp-state-machine", - "sp-statement-store", - "sp-tracing 10.0.0", - "sp-trie", "wat", ] @@ -18492,7 +18467,9 @@ dependencies = [ "sc-service", "sp-blockchain", "sp-core", + "sp-io", "sp-runtime", + "sp-statement-store", "thiserror", ] diff --git a/Cargo.toml b/Cargo.toml index 6af3ea4c3cb..f16c8393183 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -200,7 +200,6 @@ members = [ "substrate/bin/node-template/runtime", "substrate/bin/node/bench", "substrate/bin/node/cli", - "substrate/bin/node/executor", "substrate/bin/node/inspect", "substrate/bin/node/primitives", "substrate/bin/node/rpc", diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs index e69428d8190..c4a2b2f39d2 100644 --- a/substrate/bin/node-template/node/src/service.rs +++ b/substrate/bin/node-template/node/src/service.rs @@ -5,35 +5,17 @@ use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_consensus_grandpa::SharedVoterState; -pub use sc_executor::NativeElseWasmExecutor; use sc_service::{error::Error as ServiceError, Configuration, TaskManager, WarpSyncParams}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use std::{sync::Arc, time::Duration}; -// Our native executor instance. -pub struct ExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - /// Only enable the benchmarking host functions when we actually want to benchmark. - #[cfg(feature = "runtime-benchmarks")] - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - /// Otherwise we only use the default Substrate host functions. - #[cfg(not(feature = "runtime-benchmarks"))] - type ExtendHostFunctions = (); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - node_template_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - node_template_runtime::native_version() - } -} - -pub(crate) type FullClient = - sc_service::TFullClient>; +pub(crate) type FullClient = sc_service::TFullClient< + Block, + RuntimeApi, + sc_executor::WasmExecutor, +>; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; @@ -75,7 +57,7 @@ pub fn new_partial( }) .transpose()?; - let executor = sc_service::new_native_or_wasm_executor(config); + let executor = sc_service::new_wasm_executor::(config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( config, diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 8f3c2185deb..656d6f86505 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -90,6 +90,7 @@ sc-storage-monitor = { path = "../../../client/storage-monitor" } sc-offchain = { path = "../../../client/offchain" } # frame dependencies +frame-benchmarking = { path = "../../../frame/benchmarking" } frame-system = { path = "../../../frame/system" } frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api" } pallet-assets = { path = "../../../frame/assets" } @@ -102,7 +103,6 @@ pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip- kitchensink-runtime = { path = "../runtime" } node-rpc = { path = "../rpc" } node-primitives = { path = "../primitives" } -node-executor = { package = "staging-node-executor", path = "../executor" } # CLI-specific dependencies sc-cli = { path = "../../../client/cli", optional = true} @@ -136,6 +136,26 @@ substrate-rpc-client = { path = "../../../utils/frame/rpc/client" } pallet-timestamp = { path = "../../../frame/timestamp" } substrate-cli-test-utils = { path = "../../../test-utils/cli" } +wat = "1.0" +frame-support = { path = "../../../frame/support" } +node-testing = { path = "../testing" } +pallet-balances = { path = "../../../frame/balances" } +pallet-contracts = { path = "../../../frame/contracts" } +pallet-glutton = { path = "../../../frame/glutton" } +pallet-sudo = { path = "../../../frame/sudo" } +pallet-treasury = { path = "../../../frame/treasury" } +pallet-transaction-payment = { path = "../../../frame/transaction-payment" } +sp-application-crypto = { path = "../../../primitives/application-crypto" } +pallet-root-testing = { path = "../../../frame/root-testing" } +sp-consensus-babe = { path = "../../../primitives/consensus/babe" } +sp-externalities = { path = "../../../primitives/externalities" } +sp-keyring = { path = "../../../primitives/keyring" } +sp-runtime = { path = "../../../primitives/runtime" } +serde_json = "1.0.108" +scale-info = { version = "2.10.0", features = ["derive", "serde"] } +sp-trie = { path = "../../../primitives/trie" } +sp-state-machine = { path = "../../../primitives/state-machine" } + [build-dependencies] clap = { version = "4.4.6", optional = true } clap_complete = { version = "4.0.2", optional = true } @@ -163,14 +183,21 @@ cli = [ ] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "kitchensink-runtime/runtime-benchmarks", + "node-inspect?/runtime-benchmarks", "pallet-asset-tx-payment/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-contracts/runtime-benchmarks", + "pallet-glutton/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-skip-feeless-payment/runtime-benchmarks", + "pallet-sudo/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", "sc-client-db/runtime-benchmarks", "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", @@ -178,15 +205,22 @@ runtime-benchmarks = [ # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. try-runtime = [ + "frame-support/try-runtime", "frame-system/try-runtime", "kitchensink-runtime/try-runtime", "pallet-asset-conversion-tx-payment/try-runtime", "pallet-asset-tx-payment/try-runtime", "pallet-assets/try-runtime", "pallet-balances/try-runtime", + "pallet-contracts/try-runtime", + "pallet-glutton/try-runtime", "pallet-im-online/try-runtime", + "pallet-root-testing/try-runtime", "pallet-skip-feeless-payment/try-runtime", + "pallet-sudo/try-runtime", "pallet-timestamp/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-treasury/try-runtime", "sp-runtime/try-runtime", "substrate-cli-test-utils/try-runtime", "try-runtime-cli/try-runtime", @@ -199,3 +233,7 @@ harness = false [[bench]] name = "block_production" harness = false + +[[bench]] +name = "executor" +harness = false diff --git a/substrate/bin/node/executor/benches/bench.rs b/substrate/bin/node/cli/benches/executor.rs similarity index 71% rename from substrate/bin/node/executor/benches/bench.rs rename to substrate/bin/node/cli/benches/executor.rs index 587e76af867..d654646904b 100644 --- a/substrate/bin/node/executor/benches/bench.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -22,20 +22,16 @@ use kitchensink_runtime::{ constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, RuntimeCall, RuntimeGenesisConfig, UncheckedExtrinsic, }; -use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; -use sc_executor::{ - Externalities, NativeElseWasmExecutor, RuntimeVersionOf, WasmExecutionMethod, WasmExecutor, - WasmtimeInstantiationStrategy, -}; +use sc_executor::{Externalities, RuntimeVersionOf}; use sp_core::{ storage::well_known_keys, traits::{CallContext, CodeExecutor, RuntimeCode}, }; use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; -use staging_node_executor as node_executor; +use staging_node_cli::service::RuntimeExecutor; criterion_group!(benches, bench_execute_block); criterion_main!(benches); @@ -58,12 +54,6 @@ const HEAP_PAGES: u64 = 20; type TestExternalities = CoreTestExternalities; -#[derive(Debug)] -enum ExecutionMethod { - Native, - Wasm(WasmExecutionMethod), -} - fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) } @@ -80,7 +70,7 @@ fn new_test_ext(genesis_config: &RuntimeGenesisConfig) -> TestExternalities( - executor: &NativeElseWasmExecutor, + executor: &RuntimeExecutor, ext: &mut E, number: BlockNumber, parent_hash: Hash, @@ -159,7 +149,7 @@ fn construct_block( fn test_blocks( genesis_config: &RuntimeGenesisConfig, - executor: &NativeElseWasmExecutor, + executor: &RuntimeExecutor, ) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); let mut block1_extrinsics = vec![CheckedExtrinsic { @@ -181,56 +171,43 @@ fn test_blocks( fn bench_execute_block(c: &mut Criterion) { let mut group = c.benchmark_group("execute blocks"); - let execution_methods = vec![ - ExecutionMethod::Native, - ExecutionMethod::Wasm(WasmExecutionMethod::Compiled { - instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, - }), - ]; - - for strategy in execution_methods { - group.bench_function(format!("{:?}", strategy), |b| { - let genesis_config = node_testing::genesis::config(); - let use_native = match strategy { - ExecutionMethod::Native => true, - ExecutionMethod::Wasm(..) => false, - }; - - let executor = - NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()); - let runtime_code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()), - hash: vec![1, 2, 3], - heap_pages: None, - }; - - // Get the runtime version to initialize the runtimes cache. - { - let mut test_ext = new_test_ext(&genesis_config); - executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap(); - } - - let blocks = test_blocks(&genesis_config, &executor); - - b.iter_batched_ref( - || new_test_ext(&genesis_config), - |test_ext| { - for block in blocks.iter() { - executor - .call( - &mut test_ext.ext(), - &runtime_code, - "Core_execute_block", - &block.0, - use_native, - CallContext::Offchain, - ) - .0 - .unwrap(); - } - }, - BatchSize::LargeInput, - ); - }); - } + + group.bench_function("wasm", |b| { + let genesis_config = node_testing::genesis::config(); + + let executor = RuntimeExecutor::builder().build(); + let runtime_code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()), + hash: vec![1, 2, 3], + heap_pages: None, + }; + + // Get the runtime version to initialize the runtimes cache. + { + let mut test_ext = new_test_ext(&genesis_config); + executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap(); + } + + let blocks = test_blocks(&genesis_config, &executor); + + b.iter_batched_ref( + || new_test_ext(&genesis_config), + |test_ext| { + for block in blocks.iter() { + executor + .call( + &mut test_ext.ext(), + &runtime_code, + "Core_execute_block", + &block.0, + false, + CallContext::Offchain, + ) + .0 + .unwrap(); + } + }, + BatchSize::LargeInput, + ); + }); } diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs index 16d0415ff26..dc28705c2ae 100644 --- a/substrate/bin/node/cli/src/command.rs +++ b/substrate/bin/node/cli/src/command.rs @@ -24,7 +24,6 @@ use crate::{ }; use frame_benchmarking_cli::*; use kitchensink_runtime::{ExistentialDeposit, RuntimeApi}; -use node_executor::ExecutorDispatch; use node_primitives::Block; use sc_cli::{Result, SubstrateCli}; use sc_service::PartialComponents; @@ -89,7 +88,7 @@ pub fn run() -> Result<()> { Some(Subcommand::Inspect(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) }, Some(Subcommand::Benchmark(cmd)) => { let runner = cli.create_runner(cmd)?; diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index a746de8de84..4f8c6198cdc 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -26,11 +26,9 @@ use frame_benchmarking_cli::SUBSTRATE_REFERENCE_HARDWARE; use frame_system_rpc_runtime_api::AccountNonceApi; use futures::prelude::*; use kitchensink_runtime::RuntimeApi; -use node_executor::ExecutorDispatch; use node_primitives::Block; use sc_client_api::{Backend, BlockBackend}; use sc_consensus_babe::{self, SlotProportion}; -use sc_executor::NativeElseWasmExecutor; use sc_network::{event::Event, NetworkEventStream, NetworkService}; use sc_network_sync::{warp::WarpSyncParams, SyncingService}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; @@ -42,9 +40,25 @@ use sp_core::crypto::Pair; use sp_runtime::{generic, traits::Block as BlockT, SaturatedConversion}; use std::sync::Arc; +/// Host functions required for kitchensink runtime and Substrate node. +#[cfg(not(feature = "runtime-benchmarks"))] +pub type HostFunctions = + (sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions); + +/// Host functions required for kitchensink runtime and Substrate node. +#[cfg(feature = "runtime-benchmarks")] +pub type HostFunctions = ( + sp_io::SubstrateHostFunctions, + sp_statement_store::runtime_api::HostFunctions, + frame_benchmarking::benchmarking::HostFunctions, +); + +/// A specialized `WasmExecutor` intended to use accross substrate node. It provides all required +/// HostFunctions. +pub type RuntimeExecutor = sc_executor::WasmExecutor; + /// The full client type definition. -pub type FullClient = - sc_service::TFullClient>; +pub type FullClient = sc_service::TFullClient; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; type FullGrandpaBlockImport = @@ -174,7 +188,7 @@ pub fn new_partial( }) .transpose()?; - let executor = sc_service::new_native_or_wasm_executor(&config); + let executor = sc_service::new_wasm_executor(&config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( diff --git a/substrate/bin/node/executor/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs similarity index 100% rename from substrate/bin/node/executor/tests/basic.rs rename to substrate/bin/node/cli/tests/basic.rs diff --git a/substrate/bin/node/executor/tests/common.rs b/substrate/bin/node/cli/tests/common.rs similarity index 95% rename from substrate/bin/node/executor/tests/common.rs rename to substrate/bin/node/cli/tests/common.rs index 2d68c88db92..7d8f6a9a0e7 100644 --- a/substrate/bin/node/executor/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -18,7 +18,7 @@ use codec::{Decode, Encode}; use frame_support::Hashable; use frame_system::offchain::AppCrypto; -use sc_executor::{error::Result, NativeElseWasmExecutor, WasmExecutor}; +use sc_executor::error::Result; use sp_consensus_babe::{ digests::{PreDigest, SecondaryPlainPreDigest}, Slot, BABE_ENGINE_ID, @@ -38,11 +38,10 @@ use kitchensink_runtime::{ constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, Runtime, UncheckedExtrinsic, }; -use node_executor::ExecutorDispatch; use node_primitives::{BlockNumber, Hash}; use node_testing::keyring::*; use sp_externalities::Externalities; -use staging_node_executor as node_executor; +use staging_node_cli::service::RuntimeExecutor; pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test"); @@ -98,8 +97,8 @@ pub fn from_block_number(n: u32) -> Header { Header::new(n, Default::default(), Default::default(), [69; 32].into(), Default::default()) } -pub fn executor() -> NativeElseWasmExecutor { - NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) +pub fn executor() -> RuntimeExecutor { + RuntimeExecutor::builder().build() } pub fn executor_call( diff --git a/substrate/bin/node/executor/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs similarity index 100% rename from substrate/bin/node/executor/tests/fees.rs rename to substrate/bin/node/cli/tests/fees.rs diff --git a/substrate/bin/node/executor/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json similarity index 100% rename from substrate/bin/node/executor/tests/res/default_genesis_config.json rename to substrate/bin/node/cli/tests/res/default_genesis_config.json diff --git a/substrate/bin/node/executor/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs similarity index 100% rename from substrate/bin/node/executor/tests/submit_transaction.rs rename to substrate/bin/node/cli/tests/submit_transaction.rs diff --git a/substrate/bin/node/executor/Cargo.toml b/substrate/bin/node/executor/Cargo.toml deleted file mode 100644 index 595a313d2cb..00000000000 --- a/substrate/bin/node/executor/Cargo.toml +++ /dev/null @@ -1,57 +0,0 @@ -[package] -name = "staging-node-executor" -version = "3.0.0-dev" -authors.workspace = true -description = "Substrate node implementation in Rust." -edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" -repository.workspace = true -publish = false - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1" } -scale-info = { version = "2.10.0", features = ["derive", "serde"] } -frame-benchmarking = { path = "../../../frame/benchmarking" } -node-primitives = { path = "../primitives" } -kitchensink-runtime = { path = "../runtime" } -sc-executor = { path = "../../../client/executor" } -sp-core = { path = "../../../primitives/core", features=["serde"] } -sp-keystore = { path = "../../../primitives/keystore" } -sp-state-machine = { path = "../../../primitives/state-machine" } -sp-tracing = { path = "../../../primitives/tracing" } -sp-trie = { path = "../../../primitives/trie" } -sp-statement-store = { path = "../../../primitives/statement-store", features=["serde"] } - -[dev-dependencies] -criterion = "0.4.0" -futures = "0.3.21" -wat = "1.0" -frame-support = { path = "../../../frame/support" } -frame-system = { path = "../../../frame/system" } -node-testing = { path = "../testing" } -pallet-balances = { path = "../../../frame/balances" } -pallet-contracts = { path = "../../../frame/contracts" } -pallet-im-online = { path = "../../../frame/im-online" } -pallet-glutton = { path = "../../../frame/glutton" } -pallet-sudo = { path = "../../../frame/sudo" } -pallet-timestamp = { path = "../../../frame/timestamp" } -pallet-treasury = { path = "../../../frame/treasury" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment" } -sp-application-crypto = { path = "../../../primitives/application-crypto" } -pallet-root-testing = { path = "../../../frame/root-testing" } -sp-consensus-babe = { path = "../../../primitives/consensus/babe" } -sp-externalities = { path = "../../../primitives/externalities" } -sp-keyring = { path = "../../../primitives/keyring" } -sp-runtime = { path = "../../../primitives/runtime" } -serde_json = "1.0.108" - -[features] -stress-test = [] - -[[bench]] -name = "bench" -harness = false diff --git a/substrate/bin/node/executor/src/lib.rs b/substrate/bin/node/executor/src/lib.rs deleted file mode 100644 index 3557a16740b..00000000000 --- a/substrate/bin/node/executor/src/lib.rs +++ /dev/null @@ -1,40 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A `CodeExecutor` specialization which uses natively compiled runtime when the wasm to be -//! executed is equivalent to the natively compiled code. - -pub use sc_executor::NativeElseWasmExecutor; - -// Declare an instance of the native executor named `ExecutorDispatch`. Include the wasm binary as -// the equivalent wasm code. -pub struct ExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - type ExtendHostFunctions = ( - frame_benchmarking::benchmarking::HostFunctions, - sp_statement_store::runtime_api::HostFunctions, - ); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - kitchensink_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - kitchensink_runtime::native_version() - } -} diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index 30cc22b0e8c..cfdec6af5de 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -20,4 +20,12 @@ sc-client-api = { path = "../../../client/api" } sc-service = { path = "../../../client/service", default-features = false} sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core" } +sp-io = { path = "../../../primitives/io" } sp-runtime = { path = "../../../primitives/runtime" } +sp-statement-store = { path = "../../../primitives/statement-store" } + +[features] +runtime-benchmarks = [ + "sc-service/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/substrate/bin/node/inspect/src/command.rs b/substrate/bin/node/inspect/src/command.rs index dcecfd78826..e0e25707e31 100644 --- a/substrate/bin/node/inspect/src/command.rs +++ b/substrate/bin/node/inspect/src/command.rs @@ -23,18 +23,20 @@ use crate::{ Inspector, }; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; -use sc_service::{Configuration, NativeExecutionDispatch}; +use sc_service::Configuration; use sp_runtime::traits::Block; +type HostFunctions = + (sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions); + impl InspectCmd { /// Run the inspect command, passing the inspector. - pub fn run(&self, config: Configuration) -> Result<()> + pub fn run(&self, config: Configuration) -> Result<()> where B: Block, RA: Send + Sync + 'static, - D: NativeExecutionDispatch + 'static, { - let executor = sc_service::new_native_or_wasm_executor::(&config); + let executor = sc_service::new_wasm_executor::(&config); let client = sc_service::new_full_client::(&config, None, executor)?; let inspect = Inspector::::new(client); diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index e4fb06b5dcd..dba5016fb3e 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.21" log = "0.4.17" tempfile = "3.1.0" frame-system = { path = "../../../frame/system" } -node-executor = { package = "staging-node-executor", path = "../executor" } +node-cli = { package = "staging-node-cli", path = "../cli" } node-primitives = { path = "../primitives" } kitchensink-runtime = { path = "../runtime" } pallet-asset-conversion = { path = "../../../frame/asset-conversion" } diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index 89b96c0191c..98d3b968a35 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -43,7 +43,7 @@ use sc_block_builder::BlockBuilderBuilder; use sc_client_api::{execution_extensions::ExecutionExtensions, UsageProvider}; use sc_client_db::PruningMode; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux}; -use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod, WasmtimeInstantiationStrategy}; +use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_consensus::BlockOrigin; @@ -388,13 +388,11 @@ impl BenchDb { let task_executor = TaskExecutor::new(); let backend = sc_service::new_db_backend(db_config).expect("Should not fail"); - let executor = NativeElseWasmExecutor::new_with_wasm_executor( - sc_executor::WasmExecutor::builder() - .with_execution_method(WasmExecutionMethod::Compiled { - instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, - }) - .build(), - ); + let executor = sc_executor::WasmExecutor::builder() + .with_execution_method(WasmExecutionMethod::Compiled { + instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, + }) + .build(); let client_config = sc_service::ClientConfig::default(); let genesis_block_builder = sc_service::GenesisBlockBuilder::new( diff --git a/substrate/bin/node/testing/src/client.rs b/substrate/bin/node/testing/src/client.rs index 22276833fb6..07ba1cdbbfb 100644 --- a/substrate/bin/node/testing/src/client.rs +++ b/substrate/bin/node/testing/src/client.rs @@ -23,7 +23,7 @@ use sp_runtime::BuildStorage; pub use substrate_test_client::*; /// Call executor for `kitchensink-runtime` `TestClient`. -pub type ExecutorDispatch = sc_executor::NativeElseWasmExecutor; +use node_cli::service::RuntimeExecutor; /// Default backend type. pub type Backend = sc_client_db::Backend; @@ -31,7 +31,7 @@ pub type Backend = sc_client_db::Backend; /// Test client type. pub type Client = client::Client< Backend, - client::LocalCallExecutor, + client::LocalCallExecutor, node_primitives::Block, kitchensink_runtime::RuntimeApi, >; @@ -63,7 +63,7 @@ pub trait TestClientBuilderExt: Sized { impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< node_primitives::Block, - client::LocalCallExecutor, + client::LocalCallExecutor, Backend, GenesisParameters, > @@ -71,8 +71,17 @@ impl TestClientBuilderExt fn new() -> Self { Self::default() } - fn build(self) -> Client { - self.build_with_native_executor(None).0 + let executor = RuntimeExecutor::builder().build(); + use sc_service::client::LocalCallExecutor; + use std::sync::Arc; + let executor = LocalCallExecutor::new( + self.backend().clone(), + executor.clone(), + Default::default(), + ExecutionExtensions::new(None, Arc::new(executor)), + ) + .expect("Creates LocalCallExecutor"); + self.build_with_executor(executor).0 } } -- GitLab From 63ac2471aa0210f0ac9903bdd7d8f9351f9a635f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Wed, 29 Nov 2023 11:07:07 +0000 Subject: [PATCH 153/199] Remove system parachains Polkadot and Kusama runtimes (#1737) Since the Polkadot and Kusama runtimes are no longer in the repo, the relevant systems parachains runtimes also need to be removed. More context [here](https://github.com/paritytech/polkadot-sdk/issues/603) and [here](https://github.com/paritytech/polkadot-sdk/pull/1731). Removes the following: - `asset-hub-kusama` and `asset-hub-polkadot` - `bridge-hub-kusama` and `bridge-hub-polkadot` - `collectives-polkadot` - `glutton-kusama` Partially solves #603 and adds to #1731. --- .gitlab/pipeline/short-benchmarks.yml | 30 - Cargo.lock | 395 ----- Cargo.toml | 6 - .../assets/asset-hub-kusama/0_xcm/0_init.yml | 145 -- .../assets/asset-hub-kusama/0_xcm/1_dmp.yml | 263 --- .../assets/asset-hub-kusama/0_xcm/2_ump.yml | 191 -- .../0_xcm/3_force_hrmp-open-channels.yml | 122 -- .../assets/asset-hub-kusama/0_xcm/4_hrmp.yml | 388 ----- .../e2e/assets/asset-hub-kusama/config.toml | 71 - .../asset-hub-polkadot/0_xcm/0_init.yml | 145 -- .../assets/asset-hub-polkadot/0_xcm/1_dmp.yml | 263 --- .../assets/asset-hub-polkadot/0_xcm/2_ump.yml | 194 --- .../0_xcm/3_force_hrmp-open-channels.yml | 120 -- .../asset-hub-polkadot/0_xcm/4_hrmp.yml | 388 ----- .../e2e/assets/asset-hub-polkadot/config.toml | 72 - .../e2e/collectives/README.md | 26 - .../collectives-polkadot/0_xcm/0_init.yml | 166 -- .../collectives-polkadot/0_xcm/1_teleport.yml | 168 -- .../collectives-polkadot/0_xcm/2_reserve.yml | 53 - .../0_xcm/3_hrmp-open-channels.yml | 69 - .../1_alliance/0_join_alliance_fails.yml | 29 - .../1_alliance/1_init_alliance.yml | 256 --- .../1_alliance/2_join_alliance_fails.yml | 30 - .../1_alliance/3_kick_member.yml | 175 -- .../2_opengov/0_assethub.yml | 149 -- .../3_fellowship/0_init.yml | 209 --- .../3_fellowship/1_whitelist_call.yml | 146 -- .../3_fellowship/2_assethub.yml | 126 -- .../collectives-polkadot/config.toml | 42 - .../assets/asset-hub-kusama/Cargo.toml | 236 --- .../runtimes/assets/asset-hub-kusama/build.rs | 26 - .../assets/asset-hub-kusama/src/lib.rs | 1549 ----------------- .../src/weights/block_weights.rs | 53 - .../src/weights/cumulus_pallet_dmp_queue.rs | 131 -- .../cumulus_pallet_parachain_system.rs | 80 - .../src/weights/cumulus_pallet_xcmp_queue.rs | 148 -- .../src/weights/extrinsic_weights.rs | 53 - .../src/weights/frame_system.rs | 154 -- .../asset-hub-kusama/src/weights/mod.rs | 45 - .../src/weights/pallet_asset_conversion.rs | 154 -- .../src/weights/pallet_assets_foreign.rs | 533 ------ .../src/weights/pallet_assets_local.rs | 530 ------ .../src/weights/pallet_assets_pool.rs | 530 ------ .../src/weights/pallet_balances.rs | 152 -- .../src/weights/pallet_collator_selection.rs | 248 --- .../src/weights/pallet_message_queue.rs | 179 -- .../src/weights/pallet_multisig.rs | 164 -- .../weights/pallet_nft_fractionalization.rs | 114 -- .../src/weights/pallet_nfts.rs | 772 -------- .../src/weights/pallet_proxy.rs | 225 --- .../src/weights/pallet_session.rs | 80 - .../src/weights/pallet_timestamp.rs | 74 - .../src/weights/pallet_uniques.rs | 466 ----- .../src/weights/pallet_utility.rs | 101 -- .../src/weights/pallet_xcm.rs | 324 ---- .../src/weights/paritydb_weights.rs | 63 - .../src/weights/rocksdb_weights.rs | 63 - .../asset-hub-kusama/src/weights/xcm/mod.rs | 244 --- .../xcm/pallet_xcm_benchmarks_fungible.rs | 187 -- .../xcm/pallet_xcm_benchmarks_generic.rs | 328 ---- .../assets/asset-hub-kusama/src/xcm_config.rs | 634 ------- .../assets/asset-hub-kusama/tests/tests.rs | 658 ------- .../assets/asset-hub-polkadot/Cargo.toml | 215 --- .../assets/asset-hub-polkadot/build.rs | 26 - .../assets/asset-hub-polkadot/src/lib.rs | 1383 --------------- .../src/weights/block_weights.rs | 53 - .../src/weights/cumulus_pallet_dmp_queue.rs | 131 -- .../cumulus_pallet_parachain_system.rs | 80 - .../src/weights/cumulus_pallet_xcmp_queue.rs | 148 -- .../src/weights/extrinsic_weights.rs | 53 - .../src/weights/frame_system.rs | 154 -- .../asset-hub-polkadot/src/weights/mod.rs | 42 - .../src/weights/pallet_assets_foreign.rs | 532 ------ .../src/weights/pallet_assets_local.rs | 528 ------ .../src/weights/pallet_balances.rs | 152 -- .../src/weights/pallet_collator_selection.rs | 246 --- .../src/weights/pallet_message_queue.rs | 179 -- .../src/weights/pallet_multisig.rs | 164 -- .../src/weights/pallet_nfts.rs | 772 -------- .../src/weights/pallet_proxy.rs | 225 --- .../src/weights/pallet_session.rs | 80 - .../src/weights/pallet_timestamp.rs | 74 - .../src/weights/pallet_uniques.rs | 466 ----- .../src/weights/pallet_utility.rs | 101 -- .../src/weights/pallet_xcm.rs | 324 ---- .../src/weights/paritydb_weights.rs | 63 - .../src/weights/rocksdb_weights.rs | 63 - .../asset-hub-polkadot/src/weights/xcm/mod.rs | 244 --- .../xcm/pallet_xcm_benchmarks_fungible.rs | 187 -- .../xcm/pallet_xcm_benchmarks_generic.rs | 328 ---- .../asset-hub-polkadot/src/xcm_config.rs | 546 ------ .../assets/asset-hub-polkadot/tests/tests.rs | 683 -------- .../bridge-hubs/bridge-hub-kusama/Cargo.toml | 192 -- .../bridge-hubs/bridge-hub-kusama/build.rs | 26 - .../bridge-hubs/bridge-hub-kusama/src/lib.rs | 872 ---------- .../src/weights/block_weights.rs | 53 - .../src/weights/cumulus_pallet_dmp_queue.rs | 131 -- .../cumulus_pallet_parachain_system.rs | 80 - .../src/weights/cumulus_pallet_xcmp_queue.rs | 148 -- .../src/weights/extrinsic_weights.rs | 53 - .../src/weights/frame_system.rs | 154 -- .../bridge-hub-kusama/src/weights/mod.rs | 41 - .../src/weights/pallet_balances.rs | 152 -- .../src/weights/pallet_collator_selection.rs | 248 --- .../src/weights/pallet_message_queue.rs | 179 -- .../src/weights/pallet_multisig.rs | 164 -- .../src/weights/pallet_session.rs | 80 - .../src/weights/pallet_timestamp.rs | 74 - .../src/weights/pallet_utility.rs | 101 -- .../src/weights/pallet_xcm.rs | 323 ---- .../src/weights/paritydb_weights.rs | 63 - .../src/weights/rocksdb_weights.rs | 63 - .../bridge-hub-kusama/src/weights/xcm/mod.rs | 244 --- .../xcm/pallet_xcm_benchmarks_fungible.rs | 187 -- .../xcm/pallet_xcm_benchmarks_generic.rs | 328 ---- .../bridge-hub-kusama/src/xcm_config.rs | 280 --- .../bridge-hub-kusama/tests/tests.rs | 51 - .../bridge-hub-polkadot/Cargo.toml | 192 -- .../bridge-hubs/bridge-hub-polkadot/build.rs | 26 - .../bridge-hub-polkadot/src/lib.rs | 873 ---------- .../src/weights/block_weights.rs | 53 - .../src/weights/cumulus_pallet_dmp_queue.rs | 131 -- .../cumulus_pallet_parachain_system.rs | 80 - .../src/weights/cumulus_pallet_xcmp_queue.rs | 148 -- .../src/weights/extrinsic_weights.rs | 53 - .../src/weights/frame_system.rs | 154 -- .../bridge-hub-polkadot/src/weights/mod.rs | 41 - .../src/weights/pallet_balances.rs | 152 -- .../src/weights/pallet_collator_selection.rs | 248 --- .../src/weights/pallet_message_queue.rs | 179 -- .../src/weights/pallet_multisig.rs | 164 -- .../src/weights/pallet_session.rs | 80 - .../src/weights/pallet_timestamp.rs | 74 - .../src/weights/pallet_utility.rs | 101 -- .../src/weights/pallet_xcm.rs | 323 ---- .../src/weights/paritydb_weights.rs | 63 - .../src/weights/rocksdb_weights.rs | 63 - .../src/weights/xcm/mod.rs | 244 --- .../xcm/pallet_xcm_benchmarks_fungible.rs | 187 -- .../xcm/pallet_xcm_benchmarks_generic.rs | 328 ---- .../bridge-hub-polkadot/src/xcm_config.rs | 284 --- .../bridge-hub-polkadot/tests/tests.rs | 51 - .../collectives-polkadot/Cargo.toml | 226 --- .../collectives/collectives-polkadot/build.rs | 26 - .../src/ambassador/mod.rs | 255 --- .../src/ambassador/origins.rs | 135 -- .../src/ambassador/tracks.rs | 282 --- .../src/fellowship/migration.rs | 261 --- .../src/fellowship/mod.rs | 239 --- .../src/fellowship/origins.rs | 247 --- .../src/fellowship/tracks.rs | 532 ------ .../collectives-polkadot/src/impls.rs | 229 --- .../collectives-polkadot/src/lib.rs | 1031 ----------- .../src/weights/block_weights.rs | 53 - .../src/weights/cumulus_pallet_dmp_queue.rs | 131 -- .../cumulus_pallet_parachain_system.rs | 80 - .../src/weights/cumulus_pallet_xcmp_queue.rs | 148 -- .../src/weights/extrinsic_weights.rs | 53 - .../src/weights/frame_system.rs | 154 -- .../collectives-polkadot/src/weights/mod.rs | 50 - .../src/weights/pallet_alliance.rs | 494 ------ .../src/weights/pallet_balances.rs | 152 -- .../src/weights/pallet_collator_selection.rs | 246 --- .../src/weights/pallet_collective.rs | 304 ---- .../src/weights/pallet_collective_content.rs | 93 - .../pallet_core_fellowship_ambassador_core.rs | 223 --- .../pallet_core_fellowship_fellowship_core.rs | 222 --- .../src/weights/pallet_message_queue.rs | 179 -- .../src/weights/pallet_multisig.rs | 164 -- .../src/weights/pallet_preimage.rs | 232 --- .../src/weights/pallet_proxy.rs | 225 --- ...ranked_collective_ambassador_collective.rs | 177 -- ...ranked_collective_fellowship_collective.rs | 176 -- .../pallet_referenda_ambassador_referenda.rs | 536 ------ .../pallet_referenda_fellowship_referenda.rs | 535 ------ .../pallet_salary_ambassador_salary.rs | 190 -- .../pallet_salary_fellowship_salary.rs | 189 -- .../src/weights/pallet_scheduler.rs | 206 --- .../src/weights/pallet_session.rs | 80 - .../src/weights/pallet_timestamp.rs | 74 - .../src/weights/pallet_utility.rs | 101 -- .../src/weights/pallet_xcm.rs | 323 ---- .../src/weights/paritydb_weights.rs | 63 - .../src/weights/rocksdb_weights.rs | 63 - .../collectives-polkadot/src/xcm_config.rs | 331 ---- .../glutton/glutton-kusama/Cargo.toml | 138 -- .../runtimes/glutton/glutton-kusama/build.rs | 24 - .../glutton/glutton-kusama/src/lib.rs | 528 ------ .../cumulus_pallet_parachain_system.rs | 80 - .../src/weights/frame_system.rs | 154 -- .../glutton/glutton-kusama/src/weights/mod.rs | 19 - .../src/weights/pallet_glutton.rs | 179 -- .../src/weights/pallet_message_queue.rs | 179 -- .../src/weights/pallet_timestamp.rs | 75 - .../glutton/glutton-kusama/src/xcm_config.rs | 92 - cumulus/polkadot-parachain/Cargo.toml | 18 - .../src/chain_spec/asset_hubs.rs | 357 +--- .../src/chain_spec/bridge_hubs.rs | 246 +-- .../src/chain_spec/collectives.rs | 134 -- .../src/chain_spec/glutton.rs | 56 - cumulus/polkadot-parachain/src/command.rs | 58 +- cumulus/polkadot-parachain/src/service.rs | 90 - .../tests/benchmark_storage_works.rs | 2 +- cumulus/scripts/benchmarks.sh | 9 - .../scripts/create_bridge_hub_kusama_spec.sh | 108 -- .../create_bridge_hub_polkadot_spec.sh | 108 -- cumulus/scripts/create_glutton_spec.sh | 2 +- .../scripts/parachains_integration_tests.sh | 32 - ...l => asset_hub_westend_local_network.toml} | 18 +- .../bridge_hub_kusama_local_network.toml | 67 - .../bridge_hub_polkadot_local_network.toml | 67 - cumulus/zombienet/examples/small_network.toml | 2 +- 212 files changed, 25 insertions(+), 43083 deletions(-) delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/0_init.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/1_dmp.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/2_ump.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/3_force_hrmp-open-channels.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/4_hrmp.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/config.toml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/0_init.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/1_dmp.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/2_ump.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/3_force_hrmp-open-channels.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/4_hrmp.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/config.toml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/README.md delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/0_init.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/1_teleport.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/2_reserve.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/0_join_alliance_fails.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/1_init_alliance.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/2_join_alliance_fails.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/3_kick_member.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/2_opengov/0_assethub.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/0_init.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/1_whitelist_call.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/2_assethub.yml delete mode 100644 cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/config.toml delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/build.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/block_weights.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/extrinsic_weights.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/paritydb_weights.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/rocksdb_weights.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/build.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/block_weights.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/extrinsic_weights.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/paritydb_weights.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/rocksdb_weights.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs delete mode 100644 cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/build.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/block_weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/extrinsic_weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/paritydb_weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/rocksdb_weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/build.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/block_weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/extrinsic_weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/paritydb_weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/rocksdb_weights.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs delete mode 100644 cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/build.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/migration.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/origins.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/tracks.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/impls.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/block_weights.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/extrinsic_weights.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/paritydb_weights.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/rocksdb_weights.rs delete mode 100644 cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/build.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs delete mode 100644 cumulus/parachains/runtimes/glutton/glutton-kusama/src/xcm_config.rs delete mode 100755 cumulus/scripts/create_bridge_hub_kusama_spec.sh delete mode 100755 cumulus/scripts/create_bridge_hub_polkadot_spec.sh delete mode 100755 cumulus/scripts/parachains_integration_tests.sh rename cumulus/zombienet/examples/{statemine_kusama_local_network.toml => asset_hub_westend_local_network.toml} (71%) delete mode 100644 cumulus/zombienet/examples/bridge_hub_kusama_local_network.toml delete mode 100644 cumulus/zombienet/examples/bridge_hub_polkadot_local_network.toml diff --git a/.gitlab/pipeline/short-benchmarks.yml b/.gitlab/pipeline/short-benchmarks.yml index 0218d3fdac0..97bce479927 100644 --- a/.gitlab/pipeline/short-benchmarks.yml +++ b/.gitlab/pipeline/short-benchmarks.yml @@ -49,16 +49,6 @@ short-benchmark-westend: &short-bench script: - ./artifacts/polkadot-parachain benchmark pallet --chain $RUNTIME_CHAIN --pallet "*" --extrinsic "*" --steps 2 --repeat 1 -short-benchmark-asset-hub-polkadot: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: asset-hub-polkadot-dev - -short-benchmark-asset-hub-kusama: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: asset-hub-kusama-dev - short-benchmark-asset-hub-rococo: <<: *short-bench-cumulus variables: @@ -69,16 +59,6 @@ short-benchmark-asset-hub-westend: variables: RUNTIME_CHAIN: asset-hub-westend-dev -short-benchmark-bridge-hub-polkadot: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: bridge-hub-polkadot-dev - -short-benchmark-bridge-hub-kusama: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: bridge-hub-kusama-dev - short-benchmark-bridge-hub-rococo: <<: *short-bench-cumulus variables: @@ -89,21 +69,11 @@ short-benchmark-bridge-hub-westend: variables: RUNTIME_CHAIN: bridge-hub-westend-dev -short-benchmark-collectives-polkadot: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: collectives-polkadot-dev - short-benchmark-collectives-westend: <<: *short-bench-cumulus variables: RUNTIME_CHAIN: collectives-westend-dev -short-benchmark-glutton-kusama: - <<: *short-bench-cumulus - variables: - RUNTIME_CHAIN: glutton-kusama-dev-1300 - short-benchmark-glutton-westend: <<: *short-bench-cumulus variables: diff --git a/Cargo.lock b/Cargo.lock index bf4efa60ad7..db5717a67ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -730,150 +730,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" -[[package]] -name = "asset-hub-kusama-runtime" -version = "0.9.420" -dependencies = [ - "asset-test-utils", - "assets-common", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-asset-conversion", - "pallet-asset-conversion-tx-payment", - "pallet-assets", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-nft-fractionalization", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-proxy", - "pallet-session", - "pallet-state-trie-migration", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-uniques", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "primitive-types", - "scale-info", - "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "sp-weights", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - -[[package]] -name = "asset-hub-polkadot-runtime" -version = "0.9.420" -dependencies = [ - "asset-test-utils", - "assets-common", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-proxy", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-uniques", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "sp-weights", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - [[package]] name = "asset-hub-rococo-emulated-chain" version = "0.0.0" @@ -1973,134 +1829,6 @@ dependencies = [ "sp-runtime", ] -[[package]] -name = "bridge-hub-kusama-runtime" -version = "0.1.0" -dependencies = [ - "bridge-hub-test-utils", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "serde", - "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - -[[package]] -name = "bridge-hub-polkadot-runtime" -version = "0.1.0" -dependencies = [ - "bridge-hub-test-utils", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-message-queue", - "pallet-multisig", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "pallet-xcm-benchmarks", - "parachains-common", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "serde", - "smallvec", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - [[package]] name = "bridge-hub-rococo-emulated-chain" version = "0.0.0" @@ -2861,78 +2589,6 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "collectives-polkadot-runtime" -version = "1.0.0" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-primitives-core", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "hex-literal", - "log", - "pallet-alliance", - "pallet-aura", - "pallet-authorship", - "pallet-balances", - "pallet-collator-selection", - "pallet-collective", - "pallet-collective-content", - "pallet-core-fellowship", - "pallet-message-queue", - "pallet-multisig", - "pallet-preimage", - "pallet-proxy", - "pallet-ranked-collective", - "pallet-referenda", - "pallet-salary", - "pallet-scheduler", - "pallet-session", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-utility", - "pallet-xcm", - "parachains-common", - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-runtime-common", - "scale-info", - "smallvec", - "sp-api", - "sp-arithmetic", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - [[package]] name = "collectives-westend-runtime" version = "1.0.0" @@ -6248,51 +5904,6 @@ dependencies = [ "regex", ] -[[package]] -name = "glutton-runtime" -version = "1.0.0" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "cumulus-pallet-xcm", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-timestamp", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-aura", - "pallet-glutton", - "pallet-message-queue", - "pallet-sudo", - "pallet-timestamp", - "parachains-common", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-genesis-builder", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std 8.0.0", - "sp-storage 13.0.0", - "sp-transaction-pool", - "sp-version", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-wasm-builder", -] - [[package]] name = "glutton-westend-runtime" version = "1.0.0" @@ -12939,17 +12550,12 @@ name = "polkadot-parachain-bin" version = "1.1.0" dependencies = [ "assert_cmd", - "asset-hub-kusama-runtime", - "asset-hub-polkadot-runtime", "asset-hub-rococo-runtime", "asset-hub-westend-runtime", "async-trait", - "bridge-hub-kusama-runtime", - "bridge-hub-polkadot-runtime", "bridge-hub-rococo-runtime", "bridge-hub-westend-runtime", "clap 4.4.6", - "collectives-polkadot-runtime", "collectives-westend-runtime", "color-print", "contracts-rococo-runtime", @@ -12970,7 +12576,6 @@ dependencies = [ "frame-system-rpc-runtime-api", "frame-try-runtime", "futures", - "glutton-runtime", "glutton-westend-runtime", "hex-literal", "jsonrpsee", diff --git a/Cargo.toml b/Cargo.toml index f16c8393183..e154c2f3bc8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,21 +77,15 @@ members = [ "cumulus/parachains/pallets/collective-content", "cumulus/parachains/pallets/parachain-info", "cumulus/parachains/pallets/ping", - "cumulus/parachains/runtimes/assets/asset-hub-kusama", - "cumulus/parachains/runtimes/assets/asset-hub-polkadot", "cumulus/parachains/runtimes/assets/asset-hub-rococo", "cumulus/parachains/runtimes/assets/asset-hub-westend", "cumulus/parachains/runtimes/assets/common", "cumulus/parachains/runtimes/assets/test-utils", - "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama", - "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot", "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo", "cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend", "cumulus/parachains/runtimes/bridge-hubs/test-utils", - "cumulus/parachains/runtimes/collectives/collectives-polkadot", "cumulus/parachains/runtimes/collectives/collectives-westend", "cumulus/parachains/runtimes/contracts/contracts-rococo", - "cumulus/parachains/runtimes/glutton/glutton-kusama", "cumulus/parachains/runtimes/glutton/glutton-westend", "cumulus/parachains/runtimes/starters/seedling", "cumulus/parachains/runtimes/starters/shell", diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/0_init.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/0_init.yml deleted file mode 100644 index fdc1aa258d4..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/0_init.yml +++ /dev/null @@ -1,145 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: &assets_parachain - wsPort: 9910 - paraId: &ap_id 1000 - penpal_parachain: &penpal_parachain - wsPort: 9920 - paraId: &pp_id 2000 - variables: - common: - xcm_version: &xcm_version 3 - require_weight_at_most: &weight_at_most {refTime: 1000000000, proofSize: 200000} - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - penpal_parachain: - signer: &pp_signer //Alice - decodedCalls: - ap_force_xcm_version: - chain: *assets_parachain - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version # xcmVersion - ] - -tests: - - name: Initialize Chains - its: - - name: XCM supported versions between chains - actions: - - extrinsics: # Relay Chain sets supported version for Asset Parachain - - chain: *relay_chain - sudo: true - signer: *rc_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *ap_id - } - } - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *ap_id }}}, version: *xcm_version } - - extrinsics: # Relay Chain sets supported version for Penpal Parachain - - chain: *relay_chain - sudo: true - signer: *rc_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *pp_id - } - } - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *pp_id }}}, version: *xcm_version } - - extrinsics: # Asset Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 2200000000, - proofSize: 200000 - } - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *weight_at_most, - call: $ap_force_xcm_version - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '1,019,210,000', proofSize: '200,000' }} - } - - name: polkadotXcm.SupportedVersionChanged - chain: *assets_parachain - result: { location: { parents: 1, interior: Here }, version: *xcm_version } - - extrinsics: # Penpal Parachain sets supported version for Relay Chain - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: polkadotXcm.SupportedVersionChanged - result: { location: { parents: 1, interior: Here }, version: *xcm_version } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/1_dmp.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/1_dmp.yml deleted file mode 100644 index 0e207e632a0..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/1_dmp.yml +++ /dev/null @@ -1,263 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: &assets_parachain - wsPort: 9910 - paraId: &ap_id 1000 - variables: - common: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - wallet: &rc_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - assets_parachain_destination: &ap_dest { v3: { parents: 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_account: &ap_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - assets_parachain_beneficiary: &ap_benf { v3: { parents: 0, interior: { x1: { accountId32: { id: *ap_acc }}}}} - ksm: &rc_ksm { concrete: { parents: 0, interior: { here: true }}} - amount: &amount 1000000000000 - ksm_fungible: &rc_ksm_fungible { id: *rc_ksm, fun: { fungible: *amount }} - require_weight_at_most: &rc_weight_at_most { refTime: 1000000000, proofSize: 200000 } - assets_parachain_account: - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - asset_id: &asset_id 1 - asset_min_balance: &asset_ed 1000 - decodedCalls: - force_create_asset: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - -tests: - - name: DMP - its: [] - describes: - - name: xcmPallet.limitedTeleportAssets - before: &before_get_balances - - name: Get the balances of the Relay Chain's sender & Assets Parachain's receiver - actions: - - queries: - balance_rc_sender_before: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_before: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - its: - - name: Should teleport native assets from the Relay Chain to the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '764,772,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '166,944,000', proofSize: 0 }}} - - queries: - balance_rc_sender_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_sender_before, - after: $balance_rc_sender_after, - }, - amount: *amount - } - ] - - - name: Should increase the balance of the receiver - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_ap_receiver_before, - after: $balance_ap_receiver_after, - } - } - ] - - - name: xcmPallet.send | Superuser - Transact(assets.forceCreate) - its: - - name: Relay Chain Superuser account SHOULD be able to execute a XCM Transact instruction in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *rc_weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '1,014,103,000', proofSize: '200,000' }}} - - queries: - forced_created_asset: - chain: *assets_parachain - pallet: assets - call: asset - args: [ *asset_id ] - - asserts: - isSome: - args: [ $forced_created_asset ] - - - name: xcmPallet.send | Native - Transact(assets.forceCreate) - its: - - name: Relay Chain Native account SHOULD NOT be able to execute a XCM Transact instruction in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Native, - requireWeightAtMost: *rc_weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: system.ExtrinsicFailed - result: { dispatchError: BadOrigin } - - - name: xcmPallet.limitedReserveTransferAssets - before: *before_get_balances - its: - - name: SHOULD NOT reserved transfer native assets from the Relay Chain to the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedReserveTransferAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '750,645,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { - outcome: { - Incomplete: [ - { refTime: '1,000,000,000', proofSize: 0 }, - UntrustedReserveLocation - ] - } - } - - queries: - balance_rc_sender_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_sender_before, - after: $balance_rc_sender_after, - }, - amount: *amount - } - ] - - - name: Should keep the balance of the receiver - actions: - - asserts: - equal: - args: - [ - $balance_ap_receiver_before, - $balance_ap_receiver_after - ] diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/2_ump.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/2_ump.yml deleted file mode 100644 index 2a0bb88090e..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/2_ump.yml +++ /dev/null @@ -1,191 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: &assets_parachain - wsPort: 9910 - paraId: &ap_id 1000 - variables: - common: - amount: &amount 1000000000000 - require_weight_at_most: &weight_at_most {refTime: 1000000000, proofSize: 0} - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - wallet: &rc_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F #Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_account: &ap_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - assets_parachain_beneficiary: &ap_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *ap_acc }}}}} - ksm: &rc_ksm { concrete: { 0, interior: { here: true }}} - ksm_fungible: &rc_ksm_fungible { id: *rc_ksm, fun: { fungible: *amount }} - assets_parachain_account: - signer: &ap_signer //Alice - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - relay_chain_destination: &rc_dest { v3: { parents: 1, interior: { here: true }}} - assets_parachain_account: &rc_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' #Alice - relay_chain_beneficiary: &rc_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *rc_acc }}}}} - ksm: &ap_ksm { concrete: { parents: 1, interior: { here: true }}} - ksm_fungible: &ap_ksm_fungible { id: *ap_ksm, fun: { fungible: *amount }} - decodedCalls: - system_remark: - chain: *relay_chain - pallet: system - call: remark - args: [ 0x0011 ] - -tests: - - name: UMP - describes: - - name: polkadotXcm.limitedTeleportAssets - before: - - name: DEPENDENCY | Do a 'limitedTeleportAssets' from the Relay Chain to the Assets Parachain to have funds to send them back - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '761,173,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '166,944,000', proofSize: 0 }}} - - - name: Get the balances of the Assets Parachain's sender & Relay Chain's receiver - actions: - - queries: - balance_ap_sender_before: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - balance_rc_receiver_before: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - its: - - name: Should teleport native assets back from Assets Parachain to the Relay Chain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedTeleportAssets - args: [ - *rc_dest, # destination - *rc_benf, # beneficiary - { v3: [ *ap_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '539,494,000', proofSize: '7,133' }}} - - name: messageQueue.Processed - chain: *relay_chain - threshold: *weight_threshold - result: { origin: { Ump: { Para: '1,000' } }, weightUsed: { refTime: '298,716,000', proofSize: '0' }, success: true } - - queries: - balance_ap_sender_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - balance_rc_receiver_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_ap_sender_before, - after: $balance_ap_sender_after, - }, - amount: *amount - } - ] - - - name: Should increase the balance of the receiver - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_rc_receiver_before, - after: $balance_rc_receiver_after, - } - } - ] - - - name: polkadotXcm.send | Native - Transact(system.remark) - its: - - name: Assets Parachain SHOULD NOT be able to dispatch 'send' call - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: send - args: [ - *rc_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Native, - requireWeightAtMost: *weight_at_most, - call: $system_remark - } - } - ] - } - ] - events: - - name: system.ExtrinsicFailed - result: { dispatchError: BadOrigin } - - - name: polkadotXcm.limitedReserveTransferAssets - its: - - name: Should NOT be able to reserve transfer native assets from the Assets Parachain to the Relay Chain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *rc_dest, # destination - *rc_benf, # beneficiary - { v3: [ *ap_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: polkadotXcm.Attempted - result: { outcome: { Error: Barrier }} diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/3_force_hrmp-open-channels.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/3_force_hrmp-open-channels.yml deleted file mode 100644 index dfdae028f00..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/3_force_hrmp-open-channels.yml +++ /dev/null @@ -1,122 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: - wsPort: 9910 - paraId: &ap_id 1000 - penpal_parachain: - wsPort: 9920 - paraId: &pp_id 2000 - variables: - common: - amount: &amount 2000000000000 - hrmp_channels: - proposed_max_capacity: &max_capacity 8 - proposed_max_message_size: &max_message_size 8192 - channel: &channel { - maxCapacity: *max_capacity, - maxTotalSize: *max_message_size, - maxMessageSize: *max_message_size, - msgCount: 0, - totalSize: 0, - mqcHead: null, - senderDeposit: 0, - recipientDeposit: 0 - } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_account: - sovereign_account: &ap_sovereign F7fq1jSNVTPfJmaHaXCMtatT1EZefCUsa7rRiQVNR5efcah - penpal_parachain: - sovereign_account: &pp_sovereign F7fq1jMZkfuCuoMTyiEVAP2DMpMt18WopgBqTJznLihLNbZ - -tests: - - name: HRMP - beforeEach: - - name: DEPENDENCY | Penpal Parachain Sovereign account in the Relay Chain needs to be funded - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: balances - call: transfer - args: [ - *pp_sovereign, # destination - *amount, # value - ] - events: - - name: balances.Transfer - - - name: DEPENDENCY | Assets Parachain Sovereign account in the Relay Chain needs to be funded - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: balances - call: transfer - args: [ - *ap_sovereign, # destination - *amount, # value - ] - events: - - name: balances.Transfer - describes: - - name: hrmp.forceOpenHrmpChannel (Penpal Parachain → Assets Parachain) - its: - - name: Open Penpal Parachain to Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *pp_id, - *ap_id, - *max_capacity, - *max_message_size - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: hrmp.HrmpChannelForceOpened - - - name: hrmp.forceOpenHrmpChannel (Assets Parachain → PenPal Parachain) - its: - - name: Open Assets Parachain to PenPal Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *ap_id, - *pp_id, - *max_capacity, - *max_message_size - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: hrmp.HrmpChannelForceOpened - - - name: hrmp.forceProcessHrmpOpen (make sure all the channels are open) - its: - - name: Make sure all the pending channels are open - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceProcessHrmpOpen - args: [ 2 ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/4_hrmp.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/4_hrmp.yml deleted file mode 100644 index 02e53da7558..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/0_xcm/4_hrmp.yml +++ /dev/null @@ -1,388 +0,0 @@ ---- -# Note: This tests depends on the 3_hrmp-open-channels.yml for opening channels, otherwise teleports aren't going to -# work. -settings: - chains: - relay_chain: &relay_chain - wsPort: 9900 - assets_parachain: &assets_parachain - wsPort: 9910 - paraId: &ap_id 1000 - penpal_parachain: &penpal_parachain - wsPort: 9920 - paraId: &pp_id 2000 - variables: - common: - mint_amount: &mint_amount 1000000000000 - amount: &amount 100000000000 - require_weight_at_most: &weight_at_most {refTime: 1200000000, proofSize: 20000} - amount_to_send: &amount_to_send 500000000000 - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_dest_routed: &ap_dest_routed { v3: { parents: 1, interior: { x1: { parachain: *ap_id } }}} - assets_parachain_account: - signer: &ap_signer //Alice - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - asset_id: &asset_id 2 - assets_pallet_id: &assets_pallet_id 50 - asset_min_balance: &asset_ed 1000 - penpal_parachain_destination: &pp_dest { v3: { parents: 1, interior: { x1: { parachain: *pp_id } }}} - ksm: &ap_ksm { concrete: { parents: 1, interior: { here: true }}} - ksm_fungible: &ap_ksm_fungible { id: *ap_ksm, fun: { fungible: *amount }} - suff_asset: &suff_asset { concrete: { parents: 0, interior: { x2: [ { PalletInstance: *assets_pallet_id }, { GeneralIndex: *asset_id } ] }}} - suff_asset_fail: &suff_asset_fail { concrete: { parents: 0, interior: { x2: [ { PalletInstance: *assets_pallet_id }, { GeneralIndex: 3 } ] }}} - suff_asset_fungible_fail: &ap_suff_asset_fungible_fail { id: *suff_asset_fail, fun: { fungible: 200000000000 }} - penpal_parachain: - sovereign_account: &pp_sovereign_sibl FBeL7EAeUroLWXW1yfKboiqTqVfbRBcsUKd6QqVf4kGBySS - signer: &pp_signer //Alice - penpal_parachain_account: &pp_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - decodedCalls: - force_create_asset: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - force_create_asset2: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - -tests: - - name: HRMP - describes: - - name: polkadotXcm.limitedReserveTransferAssets (Asset) | Assets Parachain -> Penpal Parachain - before: - - name: DEPENDENCY | A sufficient Asset should exist in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - SetTopic: '0x0123456789012345678901234567891201234567890123456789012345678912' - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '1,216,703,000', proofSize: '20,000' }}} - - queries: - forced_created_asset: - chain: *assets_parachain - pallet: assets - call: asset - args: [ *asset_id ] - - asserts: - isSome: - args: [ $forced_created_asset ] - - - name: DEPENDENCY | Some Assets should be minted for the sender - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: assets - call: mint - args: [ - *asset_id, - *ap_wallet, - *mint_amount - ] - events: - - name: assets.Issued - result: { assetId: *asset_id, owner: *ap_wallet, amount: *mint_amount } - - its: - - name: Assets Parachain should be able to reserve transfer an Asset to Penpal Parachain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *pp_dest, # destination - { # beneficiary - V3: { - parents: 0, - interior: { - X1: { - AccountId32: { - id: *pp_acc - } - } - } - } - }, - { # assets - V3: [ - { - id: { - Concrete: { - parents: 0, - interior: { - X2: [ - { - PalletInstance: *assets_pallet_id - }, - { - GeneralIndex: *asset_id - } - ] - } - } - }, - fun: { - Fungible: *amount_to_send - } - } - ] - }, - 0, # feeAssetItem - Unlimited # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '679,150,000', proofSize: '6,196' }}} - - name: assets.Transferred - result: { - assetId: *asset_id, - from: *ap_wallet, - to: *pp_sovereign_sibl, - amount: *amount_to_send - } - - - name: polkadotXcm.limitedReserveTransferAssets (KSM) | Assets Parachain -> Penpal Parachain - its: - - name: Assets Parachain should be able to reserve transfer KSM to Penpal Parachain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *pp_dest, # destination - { # beneficiary - V3: { - parents: 0, - interior: { - X1: { - AccountId32: { - id: *pp_acc - } - } - } - } - }, - { # assets - V3: [ - *ap_ksm_fungible - ] - }, - 0, # feeAssetItem - Unlimited # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '679,150,000', proofSize: '6,196' }}} - - name: balances.Endowed - result: { - account: *pp_sovereign_sibl, - freeBalance: *amount - } - - - name: polkadotXcm.send( assets.forceCreateAsset ) | Penpal Parachain -> Assets Parachain - before: - - name: Get the asset balance of the Penpal Parachain Sovereign account in Assets Parachain - actions: - - queries: - assets_balance_pp_sovereign_before: - chain: *assets_parachain - pallet: assets - call: account - args: [ - *asset_id, - *pp_sovereign_sibl - ] - its: - - name: Penpal Parachain should be able to send XCM message paying its fee with sufficient asset in Assets Parachain - actions: - - extrinsics: - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: send - args: [ - *ap_dest_routed, # destination - { - v3: [ #message - { - WithdrawAsset: [ - { - id: { - concrete: { - parents: 0, - interior: { - X2: [ - { PalletInstance: *assets_pallet_id }, - { GeneralIndex: *asset_id } - ] - } - } - }, - fun: { fungible: *amount }} - ] - }, - { - BuyExecution: { - fees: { id: *suff_asset, fun: { fungible: *amount }}, - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: SovereignAccount, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset2 - } - }, - { - RefundSurplus - }, - { - DepositAsset: { - assets: { Wild: All }, - beneficiary: { - parents: 0, - interior: { - X1: { - AccountId32: { - network: , # None - id: *pp_acc - } - } - }} - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: polkadotXcm.Sent - - name: assets.Burned - chain: *assets_parachain - result: { assetId: *asset_id, owner: *pp_sovereign_sibl } - - name: assets.Issued - chain: *assets_parachain - result: { assetId: *asset_id } - - queries: - assets_balance_pp_sovereign_after: - chain: *assets_parachain - pallet: assets - call: account - args: [ - *asset_id, - *pp_sovereign_sibl - ] - forced_created_asset2: - chain: *assets_parachain - pallet: assets - call: asset - args: [ 3 ] - - asserts: - isSome: - args: [ $forced_created_asset2 ] - - name: Should reduce the assets balance of the Penpal Parachain's SovereignAccount in the Assets Parachain - actions: - - asserts: - assetsDecreased: - args: [ - { - balances: { - before: $assets_balance_pp_sovereign_before, - after: $assets_balance_pp_sovereign_after, - }, - } - ] - - - name: Penpal Parachain SHOULD NOT be able to send XCM message paying its fee with sufficient assets if not enough balance - actions: - - extrinsics: - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: send - args: [ - *ap_dest_routed, # destination - { - v3: [ #message - { - WithdrawAsset: [*ap_suff_asset_fungible_fail] - }, - { - BuyExecution: { - fees: *ap_suff_asset_fungible_fail, - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: SovereignAccount, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset2 - } - } - ] - } - ] - events: - - name: xcmpQueue.Fail - chain: *assets_parachain - threshold: *weight_threshold - result: { - error: FailedToTransactAsset, - weight: { refTime: '152,426,000', proofSize: '3,593' } - } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/config.toml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/config.toml deleted file mode 100644 index 1ec06b3fa10..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-kusama/config.toml +++ /dev/null @@ -1,71 +0,0 @@ -[relaychain] -default_command = "./bin/polkadot" -default_args = [ "-lparachain=debug", "-lxcm=trace" ] -chain = "kusama-local" - - [[relaychain.nodes]] - name = "alice" - ws_port = 9900 - validator = true - args = ["--state-cache-size=0"] - - [[relaychain.nodes]] - name = "bob" - ws_port = 9901 - validator = true - - [[relaychain.nodes]] - name = "charlie" - ws_port = 9902 - validator = true - - [[relaychain.nodes]] - name = "dave" - ws_port = 9903 - validator = true - -[[parachains]] -id = 1000 -chain = "asset-hub-kusama-local" -cumulus_based = true - - [[parachains.collators]] - name = "collator1" - ws_port = 9910 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator2" - ws_port = 9911 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace" ] - -[[parachains]] -id = 2000 -chain = "penpal-kusama-2000" -cumulus_based = true - - [[parachains.collators]] - name = "collator3" - ws_port = 9920 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator4" - ws_port = 9921 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace" ] - -# [[hrmpChannels]] -# sender = 1000 -# recipient = 2000 -# maxCapacity = 8 -# maxMessageSize = 8192 - -# [[hrmpChannels]] -# sender = 2000 -# recipient = 1000 -# maxCapacity = 8 -# maxMessageSize = 8192 diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/0_init.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/0_init.yml deleted file mode 100644 index a6d3fb3ec83..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/0_init.yml +++ /dev/null @@ -1,145 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: &assets_parachain - wsPort: 9810 - paraId: &ap_id 1000 - penpal_parachain: &penpal_parachain - wsPort: 9820 - paraId: &pp_id 2000 - variables: - common: - xcm_version: &xcm_version '3' - require_weight_at_most: &weight_at_most {refTime: 1000000000, proofSize: 200000} - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - penpal_parachain: - signer: &pp_signer //Alice - decodedCalls: - ap_force_xcm_version: - chain: *assets_parachain - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version # xcmVersion - ] - -tests: - - name: Initialize Chains - its: - - name: XCM supported versions between chains - actions: - - extrinsics: # Relay Chain sets supported version for Asset Parachain - - chain: *relay_chain - sudo: true - signer: *rc_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *ap_id - } - } - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *ap_id }}}, version: *xcm_version } - - extrinsics: # Relay Chain sets supported version for Penpal Parachain - - chain: *relay_chain - sudo: true - signer: *rc_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *pp_id - } - } - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *pp_id }}}, version: *xcm_version } - - extrinsics: # Asset Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 3200000000, - proofSize: 200000 - } - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *weight_at_most, - call: $ap_force_xcm_version - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '1,019,210,000', proofSize: '200,000' }} - } - - name: polkadotXcm.SupportedVersionChanged - chain: *assets_parachain - result: { location: { parents: 1, interior: Here }, version: *xcm_version } - - extrinsics: # Penpal Parachain sets supported version for Relay Chain - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version # xcmVersion - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: polkadotXcm.SupportedVersionChanged - result: { location: { parents: 1, interior: Here}, version: *xcm_version } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/1_dmp.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/1_dmp.yml deleted file mode 100644 index 36b296f3eb1..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/1_dmp.yml +++ /dev/null @@ -1,263 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: &assets_parachain - wsPort: 9810 - paraId: &ap_id 1000 - variables: - common: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - wallet: &rc_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - assets_parachain_destination: &ap_dest { v3: { parents: 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_account: &ap_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - assets_parachain_beneficiary: &ap_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *ap_acc }}}}} - ksm: &rc_ksm { concrete: { parents: 0, interior: { here: true }}} - amount: &amount 1000000000000 - ksm_fungible: &rc_ksm_fungible { id: *rc_ksm, fun: { fungible: *amount }} - require_weight_at_most: &rc_weight_at_most {refTime: 1000000000, proofSize: 200000} - assets_parachain_account: - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - asset_id: &asset_id 1 - asset_min_balance: &asset_ed 1000 - decodedCalls: - force_create_asset: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - -tests: - - name: DMP - its: [] - describes: - - name: xcmPallet.limitedTeleportAssets - before: &before_get_balances - - name: Get the balances of the Relay Chain's sender & Assets Parachain's receiver - actions: - - queries: - balance_rc_sender_before: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_before: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - its: - - name: Should teleport native assets from the Relay Chain to the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '166,944,000', proofSize: 0 }}} - - queries: - balance_rc_sender_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_sender_before, - after: $balance_rc_sender_after, - }, - amount: *amount - } - ] - - - name: Should increase the balance of the receiver - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_ap_receiver_before, - after: $balance_ap_receiver_after, - } - } - ] - - - name: xcmPallet.send | Superuser - Transact(assets.forceCreate) - its: - - name: Relay Chain Superuser account SHOULD be able to execute a XCM Transact instruction in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originType: Superuser, - requireWeightAtMost: *rc_weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '1,014,103,000', proofSize: '200,000' }}} - - queries: - forced_created_asset: - chain: *assets_parachain - pallet: assets - call: asset - args: [ *asset_id ] - - asserts: - isSome: - args: [ $forced_created_asset ] - - - name: xcmPallet.send | Native - Transact(assets.forceCreate) - its: - - name: Relay Chain Native account SHOULD NOT be able to execute a XCM Transact instruction in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originType: Native, - requireWeightAtMost: *rc_weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: system.ExtrinsicFailed - result: { dispatchError: BadOrigin } - - - name: xcmPallet.limitedReserveTransferAssets - before: *before_get_balances - its: - - name: SHOULD NOT reserved transfer native assets from the Relay Chain to the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedReserveTransferAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '2,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { - outcome: { - Incomplete: [ - { refTime: '1,000,000,000', proofSize: 0 }, - UntrustedReserveLocation - ] - } - } - - queries: - balance_rc_sender_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - balance_ap_receiver_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_sender_before, - after: $balance_rc_sender_after, - }, - amount: *amount - } - ] - - - name: Should keep the balance of the receiver - actions: - - asserts: - equal: - args: - [ - $balance_ap_receiver_before, - $balance_ap_receiver_after - ] diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/2_ump.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/2_ump.yml deleted file mode 100644 index fa84d4b006a..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/2_ump.yml +++ /dev/null @@ -1,194 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: &assets_parachain - wsPort: 9810 - paraId: &ap_id 1000 - variables: - common: - amount: &amount 1000000000000 - require_weight_at_most: &weight_at_most {refTime: 1000000000, proofSize: 0} - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - wallet: &rc_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_account: &ap_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - assets_parachain_beneficiary: &ap_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *ap_acc }}}}} - ksm: &rc_ksm { concrete: { 0, interior: { here: true }}} - ksm_fungible: &rc_ksm_fungible { id: *rc_ksm, fun: { fungible: *amount }} - assets_parachain_account: - signer: &ap_signer //Alice - wallet: &ap_wallet HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F - relay_chain_destination: &rc_dest { v3: { parents: 1, interior: { here: true }}} - assets_parachain_account: &rc_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - relay_chain_beneficiary: &rc_benf {v3: { parents: 0, interior: { x1: { accountId32: { id: *rc_acc }}}}} - ksm: &ap_ksm { concrete: { parents: 1, interior: { here: true }}} - ksm_fungible: &ap_ksm_fungible { id: *ap_ksm, fun: { fungible: *amount }} - decodedCalls: - system_remark: - chain: *relay_chain - pallet: system - call: remark - args: [ 0x0011 ] - -tests: - - name: UMP - describes: - - name: polkadotXcm.limitedTeleportAssets - before: - - name: DEPENDENCY | Do a 'limitedTeleportAssets' from the Relay Chain to the Assets Parachain to have funds to send them back - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - *ap_dest, # destination - *ap_benf, # beneficiary - { v3: [ *rc_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '166,944,000', proofSize: 0 }}} - - - name: Get the balances of the Assets Parachain's sender & Relay Chain's receiver - actions: - - queries: - balance_ap_sender_before: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - balance_rc_receiver_before: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - - its: - - name: Should be able to teleport native assets back from Assets Parachain to the Relay Chain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedTeleportAssets - args: [ - *rc_dest, # destination - *rc_benf, # beneficiary - { v3: [ *ap_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '533,283,000', proofSize: '7,096' }}} - - name: messageQueue.Processed - chain: *relay_chain - threshold: *weight_threshold - result: { origin: { Ump: { Para: '1,000' } }, weightUsed: { refTime: '4,000,000,000', proofSize: '0' }, success: true } - - queries: - balance_ap_sender_after: - chain: *assets_parachain - pallet: system - call: account - args: [ *ap_wallet ] - balance_rc_receiver_after: - chain: *relay_chain - pallet: system - call: account - args: [ *rc_wallet ] - - - name: Should reduce the balance of the sender - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_ap_sender_before, - after: $balance_ap_sender_after, - }, - amount: *amount - } - ] - - - name: Should increase the balance of the receiver - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_rc_receiver_before, - after: $balance_rc_receiver_after, - } - } - ] - - - name: polkadotXcm.send | Native - Transact(system.remark) - its: - - name: Assets Parachain SHOULD NOT be able to dispatch 'send' call - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: send - args: [ - *rc_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originType: Native, - requireWeightAtMost: *weight_at_most, - call: $system_remark - } - } - ] - } - ] - events: - - name: system.ExtrinsicFailed - attributes: - - type: SpRuntimeDispatchError - value: BadOrigin - - - name: polkadotXcm.limitedReserveTransferAssets - its: - - name: Should NOT be able to reserve transfer native assets from the Assets Parachain to the Relay Chain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *rc_dest, # destination - *rc_benf, # beneficiary - { v3: [ *ap_ksm_fungible ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: polkadotXcm.Attempted - result: { outcome: { Error: Barrier }} diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/3_force_hrmp-open-channels.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/3_force_hrmp-open-channels.yml deleted file mode 100644 index ecf344a073b..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/3_force_hrmp-open-channels.yml +++ /dev/null @@ -1,120 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: - wsPort: 9810 - paraId: &ap_id 1000 - penpal_parachain: - wsPort: 9820 - paraId: &pp_id 2000 - variables: - common: - amount: &amount 2000000000000 - hrmp_channels: - proposed_max_capacity: &max_capacity 8 - proposed_max_message_size: &max_message_size 8192 - channel: &channel { - maxCapacity: *max_capacity, - maxTotalSize: *max_message_size, - maxMessageSize: *max_message_size, - msgCount: 0, - totalSize: 0, - mqcHead: null, - senderDeposit: 0, - recipientDeposit: 0 - } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_account: - sovereign_account: &ap_sovereign 5Ec4AhPZk8STuex8Wsi9TwDtJQxKqzPJRCH7348Xtcs9vZLJ - penpal_parachain: - sovereign_account: &pp_sovereign F7fq1jMZkfuCuoMTyiEVAP2DMpMt18WopgBqTJznLihLNbZ - -tests: - - name: HRMP - beforeEach: - - name: DEPENDENCY | Penpal Parachain Sovereign account in the Relay Chain needs to be funded - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: balances - call: transfer - args: [ - *pp_sovereign, # destination - *amount, # value - ] - events: - - name: balances.Transfer - - - name: DEPENDENCY | Assets Parachain Sovereign account in the Relay Chain needs to be funded - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - pallet: balances - call: transfer - args: [ - *ap_sovereign, # destination - *amount, # value - ] - events: - - name: balances.Transfer - describes: - - name: hrmp.hrmpInitOpenChannel (Penpal Parachain → Assets Parachain) - its: - - name: Open Penpal Parachain to Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *pp_id, - *ap_id, - *max_capacity, - *max_message_size - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: hrmp.HrmpChannelForceOpened - - name: hrmp.hrmpInitOpenChannel (Assets Parachain → PenPal Parachain) - its: - - name: Open Assets Parachain to PenPal Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *ap_id, - *pp_id, - *max_capacity, - *max_message_size - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: hrmp.HrmpChannelForceOpened - - name: hrmp.forceProcessHrmpOpen (make sure all the channels are open) - its: - - name: Make sure all the pending channels are open - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: hrmp - call: forceProcessHrmpOpen - args: [ 2 ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/4_hrmp.yml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/4_hrmp.yml deleted file mode 100644 index 681af698c16..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/0_xcm/4_hrmp.yml +++ /dev/null @@ -1,388 +0,0 @@ ---- -# Note: This tests depends on the 3_hrmp-open-channels.yml for opening channels, otherwise teleports aren't going to -# work. -settings: - chains: - relay_chain: &relay_chain - wsPort: 9800 - assets_parachain: &assets_parachain - wsPort: 9810 - paraId: &ap_id 1000 - penpal_parachain: &penpal_parachain - wsPort: 9820 - paraId: &pp_id 2000 - variables: - common: - mint_amount: &mint_amount 1000000000000 - amount: &amount 1000000000000 - require_weight_at_most: &weight_at_most {refTime: 1200000000, proofSize: 20000} - amount_to_send: &amount_to_send 500000000000 - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - relay_chain: - signer: &rc_signer //Alice - assets_parachain_destination: &ap_dest { v3: { 0, interior: { x1: { parachain: *ap_id }}}} - assets_parachain_dest_routed: &ap_dest_routed { v3: { parents: 1, interior: { x1: { parachain: *ap_id } }}} - assets_parachain_account: - signer: &ap_signer //Alice - wallet: &ap_wallet 15oF4uVJwmo4TdGW7VfQxNLavjCXviqxT9S1MgbjMNHr6Sp5 - asset_id: &asset_id 2 - assets_pallet_id: &assets_pallet_id 50 - asset_min_balance: &asset_ed 1000 - penpal_parachain_destination: &pp_dest { v3: { parents: 1, interior: { x1: { parachain: *pp_id } }}} - ksm: &ap_ksm { concrete: { parents: 1, interior: { here: true }}} - ksm_fungible: &ap_ksm_fungible { id: *ap_ksm, fun: { fungible: *amount }} - suff_asset: &suff_asset { concrete: { parents: 0, interior: { x2: [ { PalletInstance: *assets_pallet_id }, { GeneralIndex: *asset_id } ] }}} - suff_asset_fail: &suff_asset_fail { concrete: { parents: 0, interior: { x2: [ { PalletInstance: *assets_pallet_id }, { GeneralIndex: 3 } ] }}} - suff_asset_fungible_fail: &ap_suff_asset_fungible_fail { id: *suff_asset_fail, fun: { fungible: 200000000000 }} - penpal_parachain: - sovereign_account: &pp_sovereign_sibl 13cKp89Msu7M2PiaCuuGr1BzAsD5V3vaVbDMs3YtjMZHdGwR - signer: &pp_signer //Alice - penpal_parachain_account: &pp_acc '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - decodedCalls: - force_create_asset: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - force_create_asset2: - chain: *assets_parachain - pallet: assets - call: forceCreate - args: [ - *asset_id, - { Id: *ap_wallet }, # owner - true, # isSufficient - *asset_ed # minBalance - ] - -tests: - - name: HRMP - describes: - - name: polkadotXcm.limitedReserveTransferAssets (Asset) | Assets Parachain -> Penpal Parachain - before: - - name: DEPENDENCY | A sufficient Asset should exist in the Assets Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *rc_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - *ap_dest, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - SetTopic: '0x0123456789012345678901234567891201234567890123456789012345678912' - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset - } - } - ] - } - ] - events: - - name: xcmPallet.Sent - - name: dmpQueue.ExecutedDownward - chain: *assets_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '1,216,703,000', proofSize: '20,000' }}} - - queries: - forced_created_asset: - chain: *assets_parachain - pallet: assets - call: asset - args: [ *asset_id ] - - asserts: - isSome: - args: [ $forced_created_asset ] - - - name: DEPENDENCY | Some Assets should be minted for the sender - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: assets - call: mint - args: [ - *asset_id, - *ap_wallet, - *mint_amount - ] - events: - - name: assets.Issued - result: { assetId: *asset_id, owner: *ap_wallet, amount: *mint_amount } - - its: - - name: Assets Parachain should be able to reserve transfer an Asset to Penpal Parachain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *pp_dest, # destination - { # beneficiary - V3: { - parents: 0, - interior: { - X1: { - AccountId32: { - id: *pp_acc - } - } - } - } - }, - { # assets - V3: [ - { - id: { - Concrete: { - parents: 0, - interior: { - X2: [ - { - PalletInstance: *assets_pallet_id - }, - { - GeneralIndex: *asset_id - } - ] - } - } - }, - fun: { - Fungible: *amount_to_send - } - } - ] - }, - 0, # feeAssetItem - Unlimited # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '673,627,000', proofSize: '6,196' }}} - - name: assets.Transferred - result: { - assetId: *asset_id, - from: *ap_wallet, - to: *pp_sovereign_sibl, - amount: *amount_to_send - } - - - name: polkadotXcm.limitedReserveTransferAssets (KSM) | Assets Parachain -> Penpal Parachain - its: - - name: Assets Parachain should be able to reserve transfer KSM to Penpal Parachain - actions: - - extrinsics: - - chain: *assets_parachain - signer: *ap_signer - pallet: polkadotXcm - call: limitedReserveTransferAssets - args: [ - *pp_dest, # destination - { # beneficiary - V3: { - parents: 0, - interior: { - X1: { - AccountId32: { - id: *pp_acc - } - } - } - } - }, - { # assets - V3: [ - *ap_ksm_fungible - ] - }, - 0, # feeAssetItem - Unlimited # weightLimit - ] - events: - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '679,150,000', proofSize: '6,196' }}} - - name: balances.Endowed - result: { - account: *pp_sovereign_sibl, - freeBalance: *amount - } - - - name: polkadotXcm.send( assets.forceCreateAsset ) | Penpal Parachain -> Assets Parachain - before: - - name: Get the asset balance of the Penpal Parachain Sovereign account in Assets Parachain - actions: - - queries: - assets_balance_pp_sovereign_before: - chain: *assets_parachain - pallet: assets - call: account - args: [ - *asset_id, - *pp_sovereign_sibl - ] - its: - - name: Penpal Parachain should be able to send XCM message paying its fee with sufficient asset in Assets Parachain - actions: - - extrinsics: - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: send - args: [ - *ap_dest_routed, # destination - { - v3: [ #message - { - WithdrawAsset: [ - { - id: { - concrete: { - parents: 0, - interior: { - X2: [ - { PalletInstance: *assets_pallet_id }, - { GeneralIndex: *asset_id } - ] - } - } - }, - fun: { fungible: *amount_to_send }} - ] - }, - { - BuyExecution: { - fees: { id: *suff_asset, fun: { fungible: *amount_to_send }}, - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: SovereignAccount, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset2 - } - }, - { - RefundSurplus - }, - { - DepositAsset: { - assets: { Wild: All }, - beneficiary: { - parents: 0, - interior: { - X1: { - AccountId32: { - network: , # None - id: *pp_acc - } - } - }} - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: polkadotXcm.Sent - - name: assets.Burned - chain: *assets_parachain - result: { assetId: *asset_id, owner: *pp_sovereign_sibl } - - name: assets.Issued - chain: *assets_parachain - result: { assetId: *asset_id } - - queries: - assets_balance_pp_sovereign_after: - chain: *assets_parachain - pallet: assets - call: account - args: [ - *asset_id, - *pp_sovereign_sibl - ] - forced_created_asset2: - chain: *assets_parachain - pallet: assets - call: asset - args: [ 3 ] - - asserts: - isSome: - args: [ $forced_created_asset2 ] - - name: Should reduce the assets balance of the Penpal Parachain's SovereignAccount in the Assets Parachain - actions: - - asserts: - assetsDecreased: - args: [ - { - balances: { - before: $assets_balance_pp_sovereign_before, - after: $assets_balance_pp_sovereign_after, - }, - } - ] - - - name: Penpal Parachain SHOULD NOT be able to send XCM message paying its fee with sufficient assets if not enough balance - actions: - - extrinsics: - - chain: *penpal_parachain - signer: *pp_signer - sudo: true - pallet: polkadotXcm - call: send - args: [ - *ap_dest_routed, # destination - { - v3: [ #message - { - WithdrawAsset: [*ap_suff_asset_fungible_fail] - }, - { - BuyExecution: { - fees: *ap_suff_asset_fungible_fail, - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: SovereignAccount, - requireWeightAtMost: *weight_at_most, - call: $force_create_asset2 - } - } - ] - } - ] - events: - - name: xcmpQueue.Fail - chain: *assets_parachain - threshold: *weight_threshold - result: { - error: FailedToTransactAsset, - weight: { refTime: '152,426,000', proofSize: '3,593' } - } diff --git a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/config.toml b/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/config.toml deleted file mode 100644 index da53cd0ad4f..00000000000 --- a/cumulus/parachains/integration-tests/e2e/assets/asset-hub-polkadot/config.toml +++ /dev/null @@ -1,72 +0,0 @@ -[relaychain] -default_command = "./bin/polkadot" -default_args = [ "-lparachain=debug", "-lxcm=trace" ] -chain = "polkadot-local" - - [[relaychain.nodes]] - name = "alice" - ws_port = 9800 - validator = true - args = ["--state-cache-size=0"] - - [[relaychain.nodes]] - name = "bob" - ws_port = 9801 - validator = true - - [[relaychain.nodes]] - name = "charlie" - ws_port = 9802 - validator = true - - [[relaychain.nodes]] - name = "dave" - ws_port = 9803 - validator = true - -[[parachains]] -id = 1000 -chain = "asset-hub-polkadot-local" -cumulus_based = true - - [[parachains.collators]] - name = "collator1" - ws_port = 9810 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator2" - ws_port = 9811 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace" ] - - -[[parachains]] -id = 2000 -chain = "penpal-polkadot-2000" -cumulus_based = true - - [[parachains.collators]] - name = "collator3" - ws_port = 9820 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator4" - ws_port = 9821 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace" ] - -# [[hrmpChannels]] -# sender = 1000 -# recipient = 2000 -# maxCapacity = 8 -# maxMessageSize = 8192 - -# [[hrmpChannels]] -# sender = 2000 -# recipient = 1000 -# maxCapacity = 8 -# maxMessageSize = 8192 diff --git a/cumulus/parachains/integration-tests/e2e/collectives/README.md b/cumulus/parachains/integration-tests/e2e/collectives/README.md deleted file mode 100644 index 9c4efe7c950..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/README.md +++ /dev/null @@ -1,26 +0,0 @@ -E2E tests concerning Polkadot Governance and the Collectives Parachain. The tests run by the Parachain Integration Tests -[tool](https://github.com/paritytech/parachains-integration-tests/). - -# Requirements -The tests require some changes to the regular production runtime builds: - -## RelayChain runtime -1. Alice has SUDO -2. Public Referenda `StakingAdmin`, `FellowshipAdmin` tracks settings (see the corresponding keys of the `TRACKS_DATA` - constant in the `governance::tracks` module of the Relay Chain runtime crate): -``` yaml -prepare_period: 5 Block, -decision_period: 1 Block, -confirm_period: 1 Block, -min_enactment_period: 1 Block, -``` - -## Collectives runtime -1. Fellowship Referenda `Fellows` track settings (see the corresponding key of the `TRACKS_DATA` constant in the - `fellowship::tracks` module of the Collectives runtime crate): -``` yaml -prepare_period: 5 Block, -decision_period: 1 Block, -confirm_period: 1 Block, -min_enactment_period: 1 Block, -``` diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/0_init.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/0_init.yml deleted file mode 100644 index 33f4d603e2a..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/0_init.yml +++ /dev/null @@ -1,166 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - assethub_parachain: &assethub_parachain - wsPort: 9810 - paraId: &sp_id 1000 - variables: - xcm_version: &xcm_version 3 - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - accounts: - alice_signer: &alice_signer //Alice - decodedCalls: - ap_force_xcm_version: - chain: *collectives_parachain - pallet: polkadotXcm - call: forceXcmVersion - args: [ - { # location - parents: 1, - interior: Here - }, - *xcm_version - ] - -tests: - - name: Initialize Chains - its: - - name: XCM supported versions between chains - actions: - - extrinsics: # Relay Chain sets supported version for Collectives Parachain - - chain: *relay_chain - sudo: true - signer: *alice_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *cp_id - } - } - }, - *xcm_version - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *cp_id }}}, version: *xcm_version } - - extrinsics: # Collectives Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 2200000000, # 2_200_000_000 - proofSize: 200000, # 200_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 200000000, # 200_000_000 - proofSize: 0, - }, - call: $ap_force_xcm_version - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - - name: polkadotXcm.SupportedVersionChanged - chain: *collectives_parachain - result: { location: { parents: 1, interior: Here }, version: *xcm_version } - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '2,200,000,000', proofSize: 0 }}} - - extrinsics: # Relay Chain sets supported version for AssetHub Parachain - - chain: *relay_chain - sudo: true - signer: *alice_signer - pallet: xcmPallet - call: forceXcmVersion - args: [ - { # location - parents: 0, - interior: { - X1: { - Parachain: *sp_id - } - } - }, - *xcm_version - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.SupportedVersionChanged - result: { location: { parents: 0, interior: { X1: { Parachain: *sp_id } } }, version: *xcm_version } - - extrinsics: # AssetHub Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { 0, interior: { x1: { parachain: *sp_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 2200000000, # 2_200_000_000 - proofSize: 200000, # 200_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 200000000, # 200_000_000 - proofSize: 0, - }, - call: $ap_force_xcm_version - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - - name: polkadotXcm.SupportedVersionChanged - chain: *assethub_parachain - result: { location: { parents: 1, interior: Here }, version: *xcm_version } - - name: dmpQueue.ExecutedDownward - chain: *assethub_parachain - result: { outcome: { Complete: {} } } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/1_teleport.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/1_teleport.yml deleted file mode 100644 index cda04859b19..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/1_teleport.yml +++ /dev/null @@ -1,168 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - accounts: - alice_signer: &acc_alice_signer //Alice - alice_account32: &acc_alice_acc32 '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - alice_ss58: &acc_alice_ss58 '15oF4uVJwmo4TdGW7VfQxNLavjCXviqxT9S1MgbjMNHr6Sp5' - checking_account: &checking_account '13UVJyLnbVp9x5XDyJv8g8r3UddNwBrdaH7AADCmw9XQWvYW' - -tests: - - name: Teleport assets from Relay Chain to Collectives Parachain successful. - before: - - name: Get the Alice balances on Relay & Collectives Chains. - actions: - - queries: - balance_rc_alice_1: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - balance_cp_alice_1: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - its: - - name: Teleport assets from Relay Chain to Collectives Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - pallet: xcmPallet - call: teleportAssets - args: [ - { v3: { 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { v3: { parents: 0, interior: { x1: { accountId32: { id: *acc_alice_acc32 }}}}}, # beneficiary - { - v3: [ - # { - # # TODO use a separate Assets to pay a fee, to receive an exact amount of assets on beneficiary account. - # # a call with two assets fails with an error right now. - # id: { concrete: { 0, interior: { here: true }}}, - # fun: { fungible: 1000000000000 } # 1_000_000_000_000 - # }, - { - id: { concrete: { 0, interior: { here: true }}}, - fun: { fungible: 20000000000000 } # 20_000_000_000_000 - } - ] - }, # assets - 0, # feeAssetItem - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '4,000,000,000', proofSize: 0 }}} - - queries: - balance_rc_alice_2: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - balance_cp_alice_2: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - - name: Alice deposit check, balance decreased on Relay Chain, increased on Collectives. - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_rc_alice_1, - after: $balance_rc_alice_2, - } - } - ] - balanceIncreased: - args: [ - { - balances: { - before: $balance_cp_alice_1, - after: $balance_cp_alice_2, - } - } - ] - - - name: Teleport assets from Collectives Parachain to Relay Chain successful - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *acc_alice_signer - pallet: polkadotXcm - call: teleportAssets - args: [ - { v3: { parents: 1, interior: { here: true }}}, # destination - { v3: { parents: 0, interior: { x1: { accountId32: { id: *acc_alice_acc32 }}}}}, # beneficiary - { - v3: [ - { - id: { concrete: { parents: 1, interior: { here: true }}}, - fun: { fungible: 10000000000000 } # 10_000_000_000_000 - } - ] - }, # assets - 0, # feeAssetItem - ] - events: - - name: balances.Withdraw - result: { who: *acc_alice_ss58, amount: 10000000000000 } - - name: polkadotXcm.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }}} - - name: balances.Withdraw - chain: *relay_chain - result: { who: *checking_account, amount: 10000000000000 } # amount received and withdrawn from registry account - - name: messageQueue.Processed - chain: *relay_chain - threshold: *weight_threshold - result: { origin: { Ump: { Para: *cp_id } }, weightUsed: { refTime: '4,000,000,000', proofSize: '0' }, success: true } - - queries: - balance_rc_alice_3: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - balance_cp_alice_3: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - - - name: Alice deposit check, balance decreased on Collectives, increased on Relay Chain. - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_cp_alice_2, - after: $balance_cp_alice_3, - } - } - ] - balanceIncreased: - args: [ - { - balances: { - before: $balance_rc_alice_2, - after: $balance_rc_alice_3, - } - } - ] -# TODO (P2) assert Alice balance before and after teleport (see example in kick_member test) -# TODO (P1) test: teleport of non relay chain assets fails diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/2_reserve.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/2_reserve.yml deleted file mode 100644 index bd17f07524a..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/2_reserve.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - chains: - accounts: - alice_signer: &alice_signer //Alice - alice_account32: &alice_acc32 '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - -tests: - - name: Reserve assets from Relay Chain to Collectives Parachain fails - its: - - name: Reserve assets from Relay Chain to Collectives Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: xcmPallet - call: reserveTransferAssets - args: [ - { v3: { 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { v3: { parents: 0, interior: { x1: { accountId32: { id: *alice_acc32 }}}}}, # beneficiary - { - v3: [ - { - id: { concrete: { 0, interior: { here: true }}}, - fun: { fungible: 20000000000000 } # 20_000_000_000_000 - } - ] - }, # assets - 0, # feeAssetItem - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '2,000,000,000', proofSize: 0 }}} - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { - outcome: { - Incomplete: [ - { refTime: '1,000,000,000', proofSize: 0 }, - UntrustedReserveLocation - ] - } - } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml deleted file mode 100644 index 17a16d9ccd7..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/0_xcm/3_hrmp-open-channels.yml +++ /dev/null @@ -1,69 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - assethub_parachain: &assethub_parachain - wsPort: 9810 - paraId: &sp_id 1000 - variables: - chains: - accounts: - alice_signer: &alice_signer //Alice - hrmp: - proposed_max_capacity: &hrmp_proposed_max_capacity 8 - proposed_max_message_size: &hrmp_proposed_max_message_size 8192 -tests: - - name: HRMP - describes: - - name: Force Open HRMP Channel From Collectives Parachain → AssetHub Parachain - its: - - name: Alice calls hrmp.forceOpenHrmpChannel - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *cp_id, # sender - *sp_id, # recipient - *hrmp_proposed_max_capacity, # proposedMaxCapacity - *hrmp_proposed_max_message_size # proposedMaxMessageSize - ] - events: - - name: hrmp.HrmpChannelForceOpened - result: { - sender: *cp_id, - recipient: *sp_id, - proposed_max_capacity: *hrmp_proposed_max_capacity, - proposed_max_message_size: *hrmp_proposed_max_message_size - } - - name: Force Open HRMP Channel From AssetHub Parachain → Collectives Parachain - its: - - name: Alice calls hrmp.forceOpenHrmpChannel - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - sudo: true - pallet: hrmp - call: forceOpenHrmpChannel - args: [ - *sp_id, # sender - *cp_id, # recipient - *hrmp_proposed_max_capacity, # proposedMaxCapacity - *hrmp_proposed_max_message_size # proposedMaxMessageSize - ] - events: - - name: hrmp.HrmpChannelForceOpened - result: { - sender: *sp_id, - recipient: *cp_id, - proposed_max_capacity: *hrmp_proposed_max_capacity, - proposed_max_message_size: *hrmp_proposed_max_message_size - } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/0_join_alliance_fails.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/0_join_alliance_fails.yml deleted file mode 100644 index 9aff8b1db10..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/0_join_alliance_fails.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -settings: - chains: - relay_chain: - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - variables: - accounts: - alice_signer: &alice_signer //Alice - -tests: - - name: Alice fails to join an the Alliance, since it is not initialized yet. - its: - - name: Alice joins alliance - actions: - - extrinsics: # Relay Chain sets supported version for Asset Parachain - - chain: *collectives_parachain - signer: *alice_signer - pallet: alliance - call: joinAlliance - args: [] - events: - - name: system.ExtrinsicFailed - result: { - dispatchError: { Module: { index: 50, error: '0x00000000' }} - } - # TODO assert with Alliance Error variant - alliance.AllianceNotYetInitialized - # issue - https://github.com/paritytech/parachains-integration-tests/issues/59 diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/1_init_alliance.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/1_init_alliance.yml deleted file mode 100644 index 1e01c701744..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/1_init_alliance.yml +++ /dev/null @@ -1,256 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &coll_para_id 1001 - variables: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - accounts: - alice_signer: &acc_alice_signer //Alice - liam_account32: &acc_liam_acc32 "0x3614671a5de540d891eb8c4939c8153a4aa790602b347c18177b86d0fc546221" # //Liam - olivia_account32: &acc_olivia_acc32 "0x24ee8a659c6716fe9f7cb4e9e028602aa12867654ca02737da9171b7ff697d5c" # //Olivia - noah_account32: &acc_noah_acc32 "0x9c6ad3bc3aa2f1b2e837898e6da9980445f7ef8b3eee0b8c8e305f8cfae68517" # //Noah - emma_account32: &acc_emma_acc32 "0x8ac272b333ba1127c8db57fa777ec820b24598a236efa648caf0d26d86f64572" # //Emma - james_account32: &acc_james_acc32 "0x9a52805151a0b5effc084af9264011139872a21a3950cb9ae0b2955c4bf92c18" # //James - ava_account32: &acc_ava_acc32 "0x348ef0b8776adbc09c862ddc29b1d193b9e24738e54eea3b0609c83856dc101c" # //Ava - mia_account32: &acc_mia_acc32 "0xaebf15374cf7e758d10232514c569a7abf81cc1b8f1e81a73dbc608a0e335264" # //Mia - decodedCalls: - init_alliance_members: - chain: *collectives_parachain - pallet: alliance - call: initMembers - args: [ - [ - *acc_liam_acc32, - *acc_olivia_acc32, - *acc_noah_acc32, - *acc_emma_acc32, - *acc_james_acc32, - *acc_ava_acc32 - ], - [ - *acc_mia_acc32 - ] - ] - init_alliance_voting_members: - chain: *collectives_parachain - pallet: alliance - call: initMembers - args: [ - [ - *acc_liam_acc32, - *acc_olivia_acc32, - *acc_noah_acc32, - *acc_emma_acc32, - *acc_james_acc32, - *acc_ava_acc32, - *acc_mia_acc32 - ], - [] - ] - disband: - chain: *collectives_parachain - pallet: alliance - call: disband - args: [ - { - fellowMembers: 6, - allyMembers: 1 - } - ] - -tests: - - name: Alliance initiated with the root call, second init call fails. Alliance disband and set again. - its: - - name: Alliance initiated, founders and fellows are set. - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *coll_para_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 3000000000, # 3_000_000_000 - proofSize: 2000000, # 2_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 1000000000, # 1_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - call: $init_alliance_members - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *coll_para_id }}}} - - name: alliance.MembersInitialized - chain: *collectives_parachain - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: '1,000,000' }}} - - - name: Alliance init call fails. - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *coll_para_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 3000000000, # 3_000_000_000 - proofSize: 2000000, # 2_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 1000000000, # 1_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - call: $init_alliance_voting_members - } - } - ] - } - ] - events: - # TODO can not currently assert variant AllianceAlreadyInitialized, XCM Transact fails silently - # issue - https://github.com/paritytech/polkadot/issues/4623 - # Next test with a disband call will fail, if this call does not fail, - # since a witness data from a disband call will be invalid. - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *coll_para_id }}}} - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: '1,000,000' }}} - - - name: Alliance disbanded and initialized again. - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *coll_para_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 5000000000, # 3_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 3000000000, # 3_000_000_000 - proofSize: 200000, # 200_000 - }, - call: $disband - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *coll_para_id }}}} - - name: alliance.AllianceDisbanded - chain: *collectives_parachain - result: { fellowMembers: 6, allyMembers: 1, unreserved: 0 } - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,321,495,872', proofSize: '181,779' }}} - - name: Alliance initiated, founders and fellows are set. - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *coll_para_id }}}}, # destination - { - v3: [ # message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 3000000000, # 3_000_000_000 - proofSize: 2000000, # 2_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 1000000000, # 1_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - call: $init_alliance_members - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *coll_para_id }}}} - - name: alliance.MembersInitialized - chain: *collectives_parachain - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { outcome: { Complete: { refTime: '3,000,000,000', proofSize: '1,000,000' }}} diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/2_join_alliance_fails.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/2_join_alliance_fails.yml deleted file mode 100644 index 2afdadae602..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/2_join_alliance_fails.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -settings: - chains: - relay_chain: - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: 1001 - variables: - accounts: - liam_signer: &acc_liam_signer //Liam - -tests: - - name: Liam fails to join an the Alliance, Liam is already a member. - its: - - name: Alice joins alliance - actions: - - extrinsics: # Relay Chain sets supported version for Asset Parachain - - chain: *collectives_parachain - signer: *acc_liam_signer - pallet: alliance - call: joinAlliance - args: [] - events: - - name: system.ExtrinsicFailed - result: { - dispatchError: { Module: { index: 50, error: '0x02000000' }} - } - # TODO assert with Alliance Error variant - alliance.AllianceNotYetInitialized - # issue - https://github.com/paritytech/parachains-integration-tests/issues/59 diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/3_kick_member.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/3_kick_member.yml deleted file mode 100644 index a5941cb4723..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/1_alliance/3_kick_member.yml +++ /dev/null @@ -1,175 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - weight_threshold: &weight_threshold { refTime: [10, 10], proofSize: [10, 10] } - init_teleport_amount: &init_teleport_amount 20000000000000 # 20_000_000_000_000 - accounts: - alice_signer: &acc_alice_signer //Alice - treasury_account32: &acc_treasury_acc32 '0x6d6f646c70792f74727372790000000000000000000000000000000000000000' - alice_account32: &acc_alice_acc32 '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - alice_ss58: &acc_alice_ss58 '15oF4uVJwmo4TdGW7VfQxNLavjCXviqxT9S1MgbjMNHr6Sp5' - decodedCalls: - alliance_kick_member: - chain: *collectives_parachain - pallet: alliance - call: kickMember - args: [ - {Id: *acc_alice_acc32} - ] - -tests: - - name: Member kicked out, deposited assets slashed and teleported to Relay Chain treasury. - before: - - name: DEPENDENCY | Do a 'limitedTeleportAssets' from the Relay Chain to the Collectives Parachain - actions: - - extrinsics: - - chain: *relay_chain - signer: *acc_alice_signer - pallet: xcmPallet - call: limitedTeleportAssets - args: [ - { v3: { 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { v3: { parents: 0, interior: { x1: { accountId32: { id: *acc_alice_acc32 }}}}}, # beneficiary - { v3: [ { id: { concrete: { 0, interior: { here: true }}}, fun: { fungible: *init_teleport_amount }} ] }, # assets - 0, # feeAssetItem - { unlimited: true } # weightLimit - ] - events: - - name: xcmPallet.Attempted - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '3,000,000,000', proofSize: 0 }} - } - - name: balances.Deposit - chain: *collectives_parachain - result: { who: *acc_alice_ss58 } - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '4,000,000,000', proofSize: 0 }} - } - - name: Get the balances of the Relay Chain's treasury & Collectives parachain's future alliance member - actions: - - queries: - balance_rc_treasury_before: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_treasury_acc32 ] - balance_cp_alice_before: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - its: - - name: Alice joins alliance - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *acc_alice_signer - pallet: alliance - call: joinAlliance - args: [] - events: - - name: balances.Reserved - chain: *collectives_parachain - result: { who: *acc_alice_ss58, amount: 10000000000000 } - - name: alliance.NewAllyJoined - result: {ally: *acc_alice_ss58, reserved: 10000000000000 } - - queries: - balance_cp_alice_after: - chain: *collectives_parachain - pallet: system - call: account - args: [ *acc_alice_acc32 ] - - name: Alice deposit check, balance decreased - actions: - - asserts: - balanceDecreased: - args: [ - { - balances: { - before: $balance_cp_alice_before, - after: $balance_cp_alice_after, - } - # TODO (P3) set `amount` and `fee` for more strict assert - } - ] - - name: Kick Alice from alliance - actions: - - extrinsics: # Asset Parachain sets supported version for Relay Chain through it - - chain: *relay_chain - signer: *acc_alice_signer - sudo: true - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: { - limited: { - refTime: 4000000000, # 4_000_000_000 - proofSize: 2000000, # 2_000_000 - }, - } - } - }, - { - Transact: { - originKind: Superuser, - requireWeightAtMost: { - refTime: 2000000000, # 2_000_000_000 - proofSize: 1000000, # 1_000_000 - }, - call: $alliance_kick_member - } - } - ] - } - ] - events: - - name: sudo.Sudid - result: { sudoResult: Ok } - - name: xcmPallet.Sent - result: { origin: { parents: 0, interior: Here }, destination: { parents: 0, interior: { X1: { Parachain: *cp_id }}}} - - name: alliance.MemberKicked - chain: *collectives_parachain - result: { member: *acc_alice_ss58, slashed: 10000000000000 } - - name: dmpQueue.ExecutedDownward - chain: *collectives_parachain - threshold: *weight_threshold - result: { - outcome: { Complete: { refTime: '4,000,000,000', proofSize: '1,000,000' }} - } - - name: messageQueue.Processed - result: { origin: { Ump: { Para: *cp_id }}, success: true } - - - queries: - balance_rc_treasury_after: - chain: *relay_chain - pallet: system - call: account - args: [ *acc_treasury_acc32 ] - - name: Slashed balance appears on the relay chain treasury account - actions: - - asserts: - balanceIncreased: - args: [ - { - balances: { - before: $balance_rc_treasury_before, - after: $balance_rc_treasury_after, - } - # TODO (P3) set `amount` and `fee` for more strict assert - } - ] diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/2_opengov/0_assethub.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/2_opengov/0_assethub.yml deleted file mode 100644 index c53efff51fb..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/2_opengov/0_assethub.yml +++ /dev/null @@ -1,149 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - assethub_parachain: &assethub_parachain - wsPort: 9810 - paraId: &ap_id 1000 - variables: - proposal_index: &proposal_index 0 - chains: - accounts: - alice_signer: &alice_signer //Alice - bob_signer: &bob_signer //Bob - decodedCalls: - set_candidates_ap: - chain: *assethub_parachain - encode: true - pallet: collatorSelection - call: setDesiredCandidates - args: [ - 3 - ] - send_set_candidates_rc: - chain: *relay_chain - encode: false - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *ap_id }}}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 200000000, # 200_000_000 - proofSize: 100000, # 100_000 - }, - call: $set_candidates_ap - } - } - ] - } - ] -tests: - - name: OpenGov - describes: - - name: Set desired candidates on AssetHub from Relay Chain OpenGov Staking track - its: - - name: Note preimage from xcm send set_desired_candidates call - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $send_set_candidates_rc - ] - events: - - name: preimage.Noted - result: {hash_: $send_set_candidates_rc.hash } - - name: Submit a proposal to set desired candidates - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: referenda - call: submit - args: [ - { - "Origins": "StakingAdmin", - }, - { - "Lookup": { - "hash_": $send_set_candidates_rc.hash, - "len": $send_set_candidates_rc.len, - }, - }, - { - "After": 1, - }, - ] - events: - - name: referenda.Submitted - result: { - index: *proposal_index, - proposal: { Lookup: { hash_: $send_set_candidates_rc.hash, len: $send_set_candidates_rc.len }} - } - - name: Alice Vote Aye - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: convictionVoting - call: vote - args: [ - *proposal_index, - { - "Standard": { - "vote": { - "aye": true, - "conviction": "Locked1x", - }, - "balance": 200000000000000, - } - }, - ] # TODO no event to catch https://github.com/paritytech/substrate/issues/14687 - - name: Bob Vote Aye - actions: - - extrinsics: - - chain: *relay_chain - signer: *bob_signer - pallet: convictionVoting - call: vote - args: [ - *proposal_index, - { - "Standard": { - "vote": { - "aye": true, - "conviction": "Locked1x", - }, - "balance": 200000000000000, - } - }, - ] # TODO no event to catch https://github.com/paritytech/substrate/issues/14687 - - name: Submit the decision deposit - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: referenda - call: placeDecisionDeposit - args: [ - *proposal_index, - ] - events: - - name: referenda.DecisionDepositPlaced - result: { index: *proposal_index } - - name: collatorSelection.NewDesiredCandidates - chain: *assethub_parachain - result: { desiredCandidates: 3 } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/0_init.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/0_init.yml deleted file mode 100644 index 1e4b2dabe21..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/0_init.yml +++ /dev/null @@ -1,209 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - proposal_index: &proposal_index 1 - chains: - accounts: - alice_signer: &alice_signer //Alice - bob_signer: &bob_signer //Bob - alice_account32: &acc_alice_acc32 '0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d' - alice_ss58: &acc_alice_ss58 '15oF4uVJwmo4TdGW7VfQxNLavjCXviqxT9S1MgbjMNHr6Sp5' - decodedCalls: - fellowship_induct_alice_cp: - chain: *collectives_parachain - encode: true - pallet: fellowshipCore - call: induct - args: [ - *acc_alice_acc32 - ] - fellowship_promote_1_alice_cp: - chain: *collectives_parachain - encode: true - pallet: fellowshipCore - call: promote - args: [ - *acc_alice_acc32, - 1 - ] - fellowship_promote_2_alice_cp: - chain: *collectives_parachain - encode: true - pallet: fellowshipCore - call: promote - args: [ - *acc_alice_acc32, - 2 - ] - fellowship_promote_3_alice_cp: - chain: *collectives_parachain - encode: true - pallet: fellowshipCore - call: promote - args: [ - *acc_alice_acc32, - 3 - ] - send_init_fellowship_rc: - chain: *relay_chain - encode: false - pallet: xcmPallet - call: send - args: [ - { v3: { parents: 0, interior: { x1: { parachain: *cp_id }}}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { # since batch_all not yet allowed over xcm, we have to send multiple `Transact`. - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 1500000000, # 1_500_000_000 - proofSize: 10000, # 10_000 - }, - call: $fellowship_induct_alice_cp - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 1500000000, # 1_500_000_000 - proofSize: 10000, # 10_000 - }, - call: $fellowship_promote_1_alice_cp - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 1500000000, # 1_500_000_000 - proofSize: 10000, # 10_000 - }, - call: $fellowship_promote_2_alice_cp - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 1500000000, # 1_500_000_000 - proofSize: 10000, # 10_000 - }, - call: $fellowship_promote_3_alice_cp - } - } - ] - } - ] - -tests: - - name: Fellowship - describes: - - name: Init the Fellowship - its: - - name: Note preimage from init fellowship call - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $send_init_fellowship_rc - ] - events: - - name: preimage.Noted - result: { hash_: $send_init_fellowship_rc.hash } - - name: Submit a proposal to init the Fellowship - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: referenda - call: submit - args: [ - { - "Origins": "FellowshipAdmin", - }, - { - "Lookup": { - "hash_": $send_init_fellowship_rc.hash, - "len": $send_init_fellowship_rc.len, - }, - }, - { - "After": 1, - }, - ] - events: - - name: referenda.Submitted - result: { - index: *proposal_index, - proposal: { Lookup: { hash_: $send_init_fellowship_rc.hash, len: $send_init_fellowship_rc.len }} - } - - name: Alice Vote Aye - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: convictionVoting - call: vote - args: [ - *proposal_index, - { - "Standard": { - "vote": { - "aye": true, - "conviction": "Locked1x", - }, - "balance": 200000000000000, - } - }, - ] # TODO no Aye event to catch https://github.com/paritytech/substrate/issues/14687 - - name: Bob Vote Aye - actions: - - extrinsics: - - chain: *relay_chain - signer: *bob_signer - pallet: convictionVoting - call: vote - args: [ - *proposal_index, - { - "Standard": { - "vote": { - "aye": true, - "conviction": "Locked1x", - }, - "balance": 200000000000000, - } - }, - ] # TODO no Aye event to catch https://github.com/paritytech/substrate/issues/14687 - - name: Submit the decision deposit - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: referenda - call: placeDecisionDeposit - args: [ - *proposal_index, - ] - events: - - name: referenda.DecisionDepositPlaced - result: { index: *proposal_index } - - name: fellowshipCollective.MemberAdded - chain: *collectives_parachain - result: { who: *acc_alice_ss58 } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/1_whitelist_call.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/1_whitelist_call.yml deleted file mode 100644 index 5991c7ae2f8..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/1_whitelist_call.yml +++ /dev/null @@ -1,146 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - variables: - fellows_proposal_index: &fellows_proposal_index 0 - chains: - accounts: - alice_signer: &alice_signer //Alice - decodedCalls: - remark_rc: - chain: *relay_chain - encode: false - pallet: system - call: remark - args: [ - "0x10" - ] - whitelist_remark_rc: - chain: *relay_chain - encode: true - pallet: whitelist - call: whitelistCall - args: [ - $remark_rc.hash - ] - send_whitelist_remark_cp: - chain: *collectives_parachain - encode: false - pallet: polkadotXcm - call: send - args: [ - { v3: { parents: 1, interior: { here: true }}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 500000000, # 500_000_000 - proofSize: 20000, # 20_000 - }, - call: $whitelist_remark_rc - } - } - ] - } - ] - -tests: - - name: Fellowship - describes: - - name: The Fellowship white list the call - its: - - name: Note preimage from the whitelist call on the Relay Chain - actions: - - extrinsics: - - chain: *relay_chain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $remark_rc - ] - events: - - name: preimage.Noted - result: { hash_: $remark_rc.hash } - - name: Note preimage from the xcm send call to white list the call above - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $send_whitelist_remark_cp, - ] - events: - - name: preimage.Noted - result: { hash_: $send_whitelist_remark_cp.hash } - - name: Submit a proposal to while list the call - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipReferenda - call: submit - args: [ - { - "FellowshipOrigins": "Fellows", - }, - { - "Lookup": { - "hash_": $send_whitelist_remark_cp.hash, - "len": $send_whitelist_remark_cp.len, - }, - }, - { - "After": 1, - }, - ] - events: - - name: fellowshipReferenda.Submitted - result: { - index: *fellows_proposal_index, - proposal: { Lookup: { hash_: $send_whitelist_remark_cp.hash, len: $send_whitelist_remark_cp.len}} - } - - name: Vote Aye - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipCollective - call: vote - args: [ - *fellows_proposal_index, - true, - ] - events: - - name: fellowshipCollective.Voted - result: { poll: *fellows_proposal_index, vote: { Aye: 1 } } - - name: Submit the decision deposit - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipReferenda - call: placeDecisionDeposit - args: [ - *fellows_proposal_index, - ] - events: - - name: fellowshipReferenda.DecisionDepositPlaced - result: {index: *fellows_proposal_index} - - name: whitelist.CallWhitelisted - chain: *relay_chain - result: { callHash: $remark_rc.hash } diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/2_assethub.yml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/2_assethub.yml deleted file mode 100644 index c0805594808..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/3_fellowship/2_assethub.yml +++ /dev/null @@ -1,126 +0,0 @@ ---- -settings: - chains: - relay_chain: &relay_chain - wsPort: 9700 - collectives_parachain: &collectives_parachain - wsPort: 9710 - paraId: &cp_id 1001 - assethub_parachain: &assethub_parachain - wsPort: 9810 - paraId: &ap_id 1000 - variables: - fellows_proposal_index: &fellows_proposal_index 1 - chains: - accounts: - alice_signer: &alice_signer //Alice - - decodedCalls: - xcmp_resume_execution_ap: - chain: *assethub_parachain - encode: true - pallet: xcmpQueue - call: resumeXcmExecution - args: [] - send_xcmp_resume_execution_cp: - chain: *collectives_parachain - encode: false - pallet: polkadotXcm - call: send - args: [ - { v3: { parents: 1, interior: { x1: { parachain: *ap_id }}}}, # destination - { - v3: [ #message - { - UnpaidExecution: { - weightLimit: Unlimited - } - }, - { - Transact: { - originKind: Xcm, - requireWeightAtMost: { - refTime: 300000000, # 300_000_000 - proofSize: 10000, # 10_000 - }, - call: $xcmp_resume_execution_ap - } - } - ] - } - ] - -tests: - - name: Fellowship - describes: - - name: The Fellowship resume xcm execution for the xcmp queue on AssetHub - its: - - name: Note preimage from the xcm send call to suspend_xcm_execution - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: preimage - call: notePreimage - args: [ - $send_xcmp_resume_execution_cp - ] - events: - - name: preimage.Noted - result: {hash_: $send_xcmp_resume_execution_cp.hash } - - name: Submit a proposal to resume xcm execution on AssetHub - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipReferenda - call: submit - args: [ - { - "FellowshipOrigins": "Fellows", - }, - { - "Lookup": { - "hash_": $send_xcmp_resume_execution_cp.hash, - "len": $send_xcmp_resume_execution_cp.len, - }, - }, - { - "After": 1, - }, - ] - events: - - name: fellowshipReferenda.Submitted - result: { - index: 1, - proposal: {Lookup: {hash_: $send_xcmp_resume_execution_cp.hash, len: $send_xcmp_resume_execution_cp.len}} - } - - name: Vote Aye - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipCollective - call: vote - args: [ - *fellows_proposal_index, - true, - ] - events: - - name: fellowshipCollective.Voted - result: { poll: *fellows_proposal_index, vote: { Aye: 1 } } - - name: Submit the decision deposit - actions: - - extrinsics: - - chain: *collectives_parachain - signer: *alice_signer - pallet: fellowshipReferenda - call: placeDecisionDeposit - args: [ - *fellows_proposal_index, - ] - events: - - name: fellowshipReferenda.DecisionDepositPlaced - result: {index: *fellows_proposal_index} - - name: xcmpQueue.Success - chain: *assethub_parachain diff --git a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/config.toml b/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/config.toml deleted file mode 100644 index 20fda92bd08..00000000000 --- a/cumulus/parachains/integration-tests/e2e/collectives/collectives-polkadot/config.toml +++ /dev/null @@ -1,42 +0,0 @@ -[relaychain] -default_command = "./bin/polkadot" -default_args = [ "-lparachain=trace", "-lxcm=trace" ] -chain = "polkadot-local" - - [[relaychain.nodes]] - name = "alice" - ws_port = 9700 - validator = true - args = ["--state-cache-size=0"] - - [[relaychain.nodes]] - name = "bob" - ws_port = 9701 - validator = true - - [[relaychain.nodes]] - name = "charlie" - ws_port = 9702 - validator = true - - [[relaychain.nodes]] - name = "dave" - ws_port = 9703 - validator = true - -[[parachains]] -id = 1001 -chain = "collectives-polkadot-local" -cumulus_based = true - - [[parachains.collators]] - name = "collator1" - ws_port = 9710 - command = "./bin/polkadot-parachain" - args = [ "-lxcm=trace", "--state-cache-size=0" ] - - [[parachains.collators]] - name = "collator2" - ws_port = 9711 - command = "./bin/polkadot-parachain" - args = ["-lxcm=trace"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml deleted file mode 100644 index f71499e0c29..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/Cargo.toml +++ /dev/null @@ -1,236 +0,0 @@ -[package] -name = "asset-hub-kusama-runtime" -version = "0.9.420" -authors.workspace = true -edition.workspace = true -description = "Kusama variant of Asset Hub parachain runtime" -license = "Apache-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1" } -log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -smallvec = "1.11.0" - -# Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-asset-conversion-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-conversion-tx-payment", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-nft-fractionalization = { path = "../../../../../substrate/frame/nft-fractionalization", default-features = false} -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false, optional = true } -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} -sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false} -# num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "scale-info", "num-traits"] } - -# Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } - -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -assets-common = { path = "../common", default-features = false } - -[dev-dependencies] -asset-test-utils = { path = "../test-utils" } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } - -[features] -default = [ "std" ] -# When enabled the `state_version` is set to `1`. -# This means that the chain will start using the new state format. The migration is lazy, so -# it requires to write a storage value to use the new state format. To migrate all the other -# storage values that aren't touched the state migration pallet is added as well. -# This pallet will migrate the entire state, controlled through some account. -# -# This feature should be removed when the main-net will be migrated. -state-trie-version-1 = [ "pallet-state-trie-migration" ] -runtime-benchmarks = [ - "assets-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "cumulus-primitives-utility/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-asset-conversion/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-collator-selection/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-nft-fractionalization/runtime-benchmarks", - "pallet-nfts/runtime-benchmarks", - "pallet-proxy/runtime-benchmarks", - "pallet-state-trie-migration/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-uniques/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "cumulus-pallet-xcmp-queue/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-asset-conversion-tx-payment/try-runtime", - "pallet-asset-conversion/try-runtime", - "pallet-assets/try-runtime", - "pallet-aura/try-runtime", - "pallet-authorship/try-runtime", - "pallet-balances/try-runtime", - "pallet-collator-selection/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-multisig/try-runtime", - "pallet-nft-fractionalization/try-runtime", - "pallet-nfts/try-runtime", - "pallet-proxy/try-runtime", - "pallet-session/try-runtime", - "pallet-state-trie-migration/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-uniques/try-runtime", - "pallet-utility/try-runtime", - "pallet-xcm/try-runtime", - "parachain-info/try-runtime", - "polkadot-runtime-common/try-runtime", - "sp-runtime/try-runtime", -] -std = [ - "assets-common/std", - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-session-benchmarking/std", - "cumulus-pallet-xcm/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-primitives-core/std", - "cumulus-primitives-utility/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "log/std", - "pallet-asset-conversion-tx-payment/std", - "pallet-asset-conversion/std", - "pallet-assets/std", - "pallet-aura/std", - "pallet-authorship/std", - "pallet-balances/std", - "pallet-collator-selection/std", - "pallet-message-queue/std", - "pallet-multisig/std", - "pallet-nft-fractionalization/std", - "pallet-nfts-runtime-api/std", - "pallet-nfts/std", - "pallet-proxy/std", - "pallet-session/std", - "pallet-state-trie-migration/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-uniques/std", - "pallet-utility/std", - "pallet-xcm-benchmarks?/std", - "pallet-xcm/std", - "parachain-info/std", - "parachains-common/std", - "polkadot-core-primitives/std", - "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", - "primitive-types/std", - "scale-info/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "sp-weights/std", - "substrate-wasm-builder", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] - -experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/build.rs deleted file mode 100644 index 60f8a125129..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/build.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[cfg(feature = "std")] -fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} - -#[cfg(not(feature = "std"))] -fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs deleted file mode 100644 index 4c76262f60a..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/lib.rs +++ /dev/null @@ -1,1549 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Asset Hub Kusama Runtime -//! -//! Asset Hub Kusama, formerly known as "Statemine", is the canary network for its Polkadot cousin. - -#![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -mod weights; -pub mod xcm_config; - -use assets_common::{ - foreign_creators::ForeignCreators, - local_and_foreign_assets::{LocalAndForeignAssets, MultiLocationConverter}, - matching::FromSiblingParachain, - AssetIdForTrustBackedAssetsConvert, MultiLocationForAssetId, -}; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Verify}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, Perbill, Permill, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -use codec::{Decode, Encode, MaxEncodedLen}; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; -use frame_support::{ - construct_runtime, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - ord_parameter_types, parameter_types, - traits::{ - AsEnsureOriginWithArg, ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, EitherOfDiverse, - InstanceFilter, TransformOrigin, - }, - weights::{ConstantMultiplier, Weight}, - BoundedVec, PalletId, -}; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, EnsureSigned, EnsureSignedBy, -}; -use pallet_asset_conversion_tx_payment::AssetConversionAdapter; -use pallet_nfts::PalletFeatures; -pub use parachains_common as common; -use parachains_common::{ - impls::DealWithFees, - kusama::{consensus::*, currency::*, fee::WeightToFee}, - message_queue::{NarrowOriginToSibling, ParaIdToSibling}, - AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, BlockNumber, Hash, Header, Nonce, - Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, - NORMAL_DISPATCH_RATIO, SLOT_DURATION, -}; -use sp_runtime::RuntimeDebug; -use xcm::opaque::v3::MultiLocation; -use xcm_config::{ - FellowshipLocation, ForeignAssetsConvertedConcreteId, GovernanceLocation, KsmLocation, - PoolAssetsConvertedConcreteId, TrustBackedAssetsConvertedConcreteId, -}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -// Polkadot imports -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::latest::BodyId; - -use crate::xcm_config::{ - ForeignCreatorsSovereignAccountOf, LocalAndForeignAssetsMultiLocationMatcher, - TrustBackedAssetsPalletLocation, -}; -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -#[cfg(feature = "state-trie-version-1")] -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - // Note: "statemine" is the legacy name for this chain. It has been renamed to - // "asset-hub-kusama". Many wallets/tools depend on the `spec_name`, so it remains "statemine" - // for the time being. Wallets/tools should update to treat "asset-hub-kusama" equally. - spec_name: create_runtime_str!("statemine"), - impl_name: create_runtime_str!("statemine"), - authoring_version: 1, - spec_version: 1_004_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 13, - state_version: 1, -}; - -#[cfg(not(feature = "state-trie-version-1"))] -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - // Note: "statemine" is the legacy name for this change. It has been renamed to - // "asset-hub-kusama". Many wallets/tools depend on the `spec_name`, so it remains "statemine" - // for the time being. Wallets/tools should update to treat "asset-hub-kusama" equally. - spec_name: create_runtime_str!("statemine"), - impl_name: create_runtime_str!("statemine"), - authoring_version: 1, - spec_version: 1_004_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 13, - state_version: 0, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 2; -} - -// Configure FRAME pallets to include in runtime. -impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = RuntimeBlockWeights; - type BlockLength = RuntimeBlockLength; - type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; - type Nonce = Nonce; - type Hash = Hash; - type Hashing = BlakeTwo256; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type BlockHashCount = BlockHashCount; - type DbWeight = RocksDbWeight; - type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); - type AccountData = pallet_balances::AccountData; - type SystemWeightInfo = weights::frame_system::WeightInfo; - type SS58Prefix = SS58Prefix; - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = weights::pallet_timestamp::WeightInfo; -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = weights::pallet_balances::WeightInfo; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - // We allow each account to have holds on it from: - // - `NftFractionalization`: 1 - type MaxHolds = ConstU32<1>; - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = MILLICENTS; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = - pallet_transaction_payment::CurrencyAdapter>; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; - type OperationalFeeMultiplier = ConstU8<5>; -} - -parameter_types! { - pub const AssetDeposit: Balance = UNITS / 10; // 1 / 10 UNITS deposit to create asset - pub const AssetAccountDeposit: Balance = deposit(1, 16); - pub const ApprovalDeposit: Balance = EXISTENTIAL_DEPOSIT; - pub const AssetsStringLimit: u32 = 50; - /// Key = 32 bytes, Value = 36 bytes (32+1+1+1+1) - // https://github.com/paritytech/substrate/blob/069917b/frame/assets/src/lib.rs#L257L271 - pub const MetadataDepositBase: Balance = deposit(1, 68); - pub const MetadataDepositPerByte: Balance = deposit(0, 1); -} - -/// We allow root to execute privileged asset operations. -pub type AssetsForceOrigin = EnsureRoot; - -// Called "Trust Backed" assets because these are generally registered by some account, and users of -// the asset assume it has some claimed backing. The pallet is called `Assets` in -// `construct_runtime` to avoid breaking changes on storage reads. -pub type TrustBackedAssetsInstance = pallet_assets::Instance1; -type TrustBackedAssetsCall = pallet_assets::Call; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = AssetIdForTrustBackedAssets; - type AssetIdParameter = codec::Compact; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - type AssetDeposit = AssetDeposit; - type MetadataDepositBase = MetadataDepositBase; - type MetadataDepositPerByte = MetadataDepositPerByte; - type ApprovalDeposit = ApprovalDeposit; - type StringLimit = AssetsStringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_local::WeightInfo; - type CallbackHandle = (); - type AssetAccountDeposit = AssetAccountDeposit; - type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); -} - -parameter_types! { - pub const AssetConversionPalletId: PalletId = PalletId(*b"py/ascon"); - pub const AllowMultiAssetPools: bool = false; - // should be non-zero if AllowMultiAssetPools is true, otherwise can be zero - pub const LiquidityWithdrawalFee: Permill = Permill::from_percent(0); -} - -ord_parameter_types! { - pub const AssetConversionOrigin: sp_runtime::AccountId32 = - AccountIdConversion::::into_account_truncating(&AssetConversionPalletId::get()); -} - -pub type PoolAssetsInstance = pallet_assets::Instance3; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type RemoveItemsLimit = ConstU32<1000>; - type AssetId = u32; - type AssetIdParameter = u32; - type Currency = Balances; - type CreateOrigin = - AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - // Deposits are zero because creation/admin is limited to Asset Conversion pallet. - type AssetDeposit = ConstU128<0>; - type AssetAccountDeposit = ConstU128<0>; - type MetadataDepositBase = ConstU128<0>; - type MetadataDepositPerByte = ConstU128<0>; - type ApprovalDeposit = ApprovalDeposit; - type StringLimit = ConstU32<50>; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_pool::WeightInfo; - type CallbackHandle = (); - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); -} - -impl pallet_asset_conversion::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type HigherPrecisionBalance = sp_core::U256; - type Currency = Balances; - type AssetBalance = Balance; - type AssetId = MultiLocation; - type Assets = LocalAndForeignAssets< - Assets, - AssetIdForTrustBackedAssetsConvert, - ForeignAssets, - >; - type PoolAssets = PoolAssets; - type PoolAssetId = u32; - type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam - type PoolSetupFeeReceiver = AssetConversionOrigin; - // should be non-zero if `AllowMultiAssetPools` is true, otherwise can be zero. - type LiquidityWithdrawalFee = LiquidityWithdrawalFee; - type LPFee = ConstU32<3>; - type PalletId = AssetConversionPalletId; - type AllowMultiAssetPools = AllowMultiAssetPools; - type MaxSwapPathLength = ConstU32<4>; - type MultiAssetId = Box; - type MultiAssetIdConverter = - MultiLocationConverter; - type MintMinLiquidity = ConstU128<100>; - type WeightInfo = weights::pallet_asset_conversion::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = - crate::xcm_config::BenchmarkMultiLocationConverter>; -} - -parameter_types! { - // we just reuse the same deposits - pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); - pub const ForeignAssetsAssetAccountDeposit: Balance = AssetAccountDeposit::get(); - pub const ForeignAssetsApprovalDeposit: Balance = ApprovalDeposit::get(); - pub const ForeignAssetsAssetsStringLimit: u32 = AssetsStringLimit::get(); - pub const ForeignAssetsMetadataDepositBase: Balance = MetadataDepositBase::get(); - pub const ForeignAssetsMetadataDepositPerByte: Balance = MetadataDepositPerByte::get(); -} - -/// Assets managed by some foreign location. Note: we do not declare a `ForeignAssetsCall` type, as -/// this type is used in proxy definitions. We assume that a foreign location would not want to set -/// an individual, local account as a proxy for the issuance of their assets. This issuance should -/// be managed by the foreign location's governance. -pub type ForeignAssetsInstance = pallet_assets::Instance2; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = MultiLocationForAssetId; - type AssetIdParameter = MultiLocationForAssetId; - type Currency = Balances; - type CreateOrigin = ForeignCreators< - (FromSiblingParachain>,), - ForeignCreatorsSovereignAccountOf, - AccountId, - >; - type ForceOrigin = AssetsForceOrigin; - type AssetDeposit = ForeignAssetsAssetDeposit; - type MetadataDepositBase = ForeignAssetsMetadataDepositBase; - type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; - type ApprovalDeposit = ForeignAssetsApprovalDeposit; - type StringLimit = ForeignAssetsAssetsStringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_foreign::WeightInfo; - type CallbackHandle = (); - type AssetAccountDeposit = ForeignAssetsAssetAccountDeposit; - type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; -} - -parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. - pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. - pub const DepositFactor: Balance = deposit(0, 32); - pub const MaxSignatories: u32 = 100; -} - -impl pallet_multisig::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type DepositBase = DepositBase; - type DepositFactor = DepositFactor; - type MaxSignatories = MaxSignatories; - type WeightInfo = weights::pallet_multisig::WeightInfo; -} - -impl pallet_utility::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type PalletsOrigin = OriginCaller; - type WeightInfo = weights::pallet_utility::WeightInfo; -} - -parameter_types! { - // One storage item; key size 32, value size 8; . - pub const ProxyDepositBase: Balance = deposit(1, 40); - // Additional storage item size of 33 bytes. - pub const ProxyDepositFactor: Balance = deposit(0, 33); - pub const MaxProxies: u16 = 32; - // One storage item; key size 32, value size 16 - pub const AnnouncementDepositBase: Balance = deposit(1, 48); - pub const AnnouncementDepositFactor: Balance = deposit(0, 66); - pub const MaxPending: u16 = 32; -} - -/// The type used to represent the kinds of proxying allowed. -#[derive( - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - RuntimeDebug, - MaxEncodedLen, - scale_info::TypeInfo, -)] -pub enum ProxyType { - /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. - Any, - /// Can execute any call that does not transfer funds or assets. - NonTransfer, - /// Proxy with the ability to reject time-delay proxy announcements. - CancelProxy, - /// Assets proxy. Can execute any call from `assets`, **including asset transfers**. - Assets, - /// Owner proxy. Can execute calls related to asset ownership. - AssetOwner, - /// Asset manager. Can execute calls related to asset management. - AssetManager, - /// Collator selection proxy. Can execute calls related to collator selection mechanism. - Collator, -} -impl Default for ProxyType { - fn default() -> Self { - Self::Any - } -} - -impl InstanceFilter for ProxyType { - fn filter(&self, c: &RuntimeCall) -> bool { - match self { - ProxyType::Any => true, - ProxyType::NonTransfer => !matches!( - c, - RuntimeCall::Balances { .. } | - RuntimeCall::Assets { .. } | - RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | - RuntimeCall::Uniques { .. } - ), - ProxyType::CancelProxy => matches!( - c, - RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Assets => { - matches!( - c, - RuntimeCall::Assets { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } | - RuntimeCall::NftFractionalization { .. } | - RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } - ) - }, - ProxyType::AssetOwner => matches!( - c, - RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::AssetManager => matches!( - c, - RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Collator => matches!( - c, - RuntimeCall::CollatorSelection { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - } - } - - fn is_superset(&self, o: &Self) -> bool { - match (self, o) { - (x, y) if x == y => true, - (ProxyType::Any, _) => true, - (_, ProxyType::Any) => false, - (ProxyType::Assets, ProxyType::AssetOwner) => true, - (ProxyType::Assets, ProxyType::AssetManager) => true, - (ProxyType::NonTransfer, ProxyType::Collator) => true, - _ => false, - } - } -} - -impl pallet_proxy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type ProxyType = ProxyType; - type ProxyDepositBase = ProxyDepositBase; - type ProxyDepositFactor = ProxyDepositFactor; - type MaxProxies = MaxProxies; - type WeightInfo = weights::pallet_proxy::WeightInfo; - type MaxPending = MaxPending; - type CallHasher = BlakeTwo256; - type AnnouncementDepositBase = AnnouncementDepositBase; - type AnnouncementDepositFactor = AnnouncementDepositFactor; -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type OutboundXcmpMessageSource = XcmpQueue; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -parameter_types! { - // Fellows pluralistic body. - pub const FellowsBodyId: BodyId = BodyId::Technical; -} - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, - >; - type ControllerOriginConverter = xcm_config::XcmOriginToTransactDispatchOrigin; - type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - -parameter_types! { - pub const Period: u32 = 6 * HOURS; - pub const Offset: u32 = 0; -} - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = weights::pallet_session::WeightInfo; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = Period; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = weights::pallet_collator_selection::WeightInfo; -} - -impl pallet_asset_conversion_tx_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Fungibles = LocalAndForeignAssets< - Assets, - AssetIdForTrustBackedAssetsConvert, - ForeignAssets, - >; - type OnChargeAssetTransaction = AssetConversionAdapter; -} - -parameter_types! { - pub const UniquesCollectionDeposit: Balance = UNITS / 10; // 1 / 10 UNIT deposit to create a collection - pub const UniquesItemDeposit: Balance = UNITS / 1_000; // 1 / 1000 UNIT deposit to mint an item - pub const UniquesMetadataDepositBase: Balance = deposit(1, 129); - pub const UniquesAttributeDepositBase: Balance = deposit(1, 0); - pub const UniquesDepositPerByte: Balance = deposit(0, 1); -} - -impl pallet_uniques::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type ForceOrigin = AssetsForceOrigin; - type CollectionDeposit = UniquesCollectionDeposit; - type ItemDeposit = UniquesItemDeposit; - type MetadataDepositBase = UniquesMetadataDepositBase; - type AttributeDepositBase = UniquesAttributeDepositBase; - type DepositPerByte = UniquesDepositPerByte; - type StringLimit = ConstU32<128>; - type KeyLimit = ConstU32<32>; - type ValueLimit = ConstU32<64>; - type WeightInfo = weights::pallet_uniques::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); - type CreateOrigin = AsEnsureOriginWithArg>; - type Locker = (); -} - -parameter_types! { - pub const NftFractionalizationPalletId: PalletId = PalletId(*b"fraction"); - pub NewAssetSymbol: BoundedVec = (*b"FRAC").to_vec().try_into().unwrap(); - pub NewAssetName: BoundedVec = (*b"Frac").to_vec().try_into().unwrap(); -} - -impl pallet_nft_fractionalization::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Deposit = AssetDeposit; - type Currency = Balances; - type NewAssetSymbol = NewAssetSymbol; - type NewAssetName = NewAssetName; - type StringLimit = AssetsStringLimit; - type NftCollectionId = ::CollectionId; - type NftId = ::ItemId; - type AssetBalance = ::Balance; - type AssetId = >::AssetId; - type Assets = Assets; - type Nfts = Nfts; - type PalletId = NftFractionalizationPalletId; - type WeightInfo = pallet_nft_fractionalization::weights::SubstrateWeight; - type RuntimeHoldReason = RuntimeHoldReason; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); -} - -parameter_types! { - pub NftsPalletFeatures: PalletFeatures = PalletFeatures::all_enabled(); - pub const NftsMaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; - // re-use the Uniques deposits - pub const NftsCollectionDeposit: Balance = UniquesCollectionDeposit::get(); - pub const NftsItemDeposit: Balance = UniquesItemDeposit::get(); - pub const NftsMetadataDepositBase: Balance = UniquesMetadataDepositBase::get(); - pub const NftsAttributeDepositBase: Balance = UniquesAttributeDepositBase::get(); - pub const NftsDepositPerByte: Balance = UniquesDepositPerByte::get(); -} - -impl pallet_nfts::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - type Locker = (); - type CollectionDeposit = NftsCollectionDeposit; - type ItemDeposit = NftsItemDeposit; - type MetadataDepositBase = NftsMetadataDepositBase; - type AttributeDepositBase = NftsAttributeDepositBase; - type DepositPerByte = NftsDepositPerByte; - type StringLimit = ConstU32<256>; - type KeyLimit = ConstU32<64>; - type ValueLimit = ConstU32<256>; - type ApprovalsLimit = ConstU32<20>; - type ItemAttributesApprovalsLimit = ConstU32<30>; - type MaxTips = ConstU32<10>; - type MaxDeadlineDuration = NftsMaxDeadlineDuration; - type MaxAttributesPerCall = ConstU32<10>; - type Features = NftsPalletFeatures; - type OffchainSignature = Signature; - type OffchainPublic = ::Signer; - type WeightInfo = weights::pallet_nfts::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); -} - -// Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime - { - // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - // RandomnessCollectiveFlip = 2 removed - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 4, - - // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - AssetTxPayment: pallet_asset_conversion_tx_payment::{Pallet, Event} = 13, - - // Collator support. the order of these 5 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - // Temporary to migrate the remaining DMP messages: - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, - - // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, - Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, - - // The main stage. - Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, - Uniques: pallet_uniques::{Pallet, Call, Storage, Event} = 51, - Nfts: pallet_nfts::{Pallet, Call, Storage, Event} = 52, - ForeignAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 53, - NftFractionalization: pallet_nft_fractionalization::{Pallet, Call, Storage, Event, HoldReason} = 54, - - PoolAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 55, - AssetConversion: pallet_asset_conversion::{Pallet, Call, Storage, Event} = 56, - - #[cfg(feature = "state-trie-version-1")] - StateTrieMigration: pallet_state_trie_migration = 70, - } -); - -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_asset_conversion_tx_payment::ChargeAssetTxPayment, -); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; -/// Migrations to apply on runtime upgrade. -pub type Migrations = (pallet_collator_selection::migration::v1::MigrateToV1,); - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, - Migrations, ->; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - frame_benchmarking::define_benchmarks!( - [frame_system, SystemBench::] - [pallet_assets, Local] - [pallet_assets, Foreign] - [pallet_assets, Pool] - [pallet_asset_conversion, AssetConversion] - [pallet_balances, Balances] - [pallet_message_queue, MessageQueue] - [pallet_multisig, Multisig] - [pallet_nft_fractionalization, NftFractionalization] - [pallet_nfts, Nfts] - [pallet_proxy, Proxy] - [pallet_session, SessionBench::] - [pallet_uniques, Uniques] - [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_parachain_system, ParachainSystem] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - // XCM - [pallet_xcm, PalletXcmExtrinsicsBenchmark::] - // NOTE: Make sure you point to the individual modules below. - [pallet_xcm_benchmarks::fungible, XcmBalances] - [pallet_xcm_benchmarks::generic, XcmGeneric] - ); -} - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_asset_conversion::AssetConversionApi< - Block, - Balance, - u128, - Box, - > for Runtime - { - fn quote_price_exact_tokens_for_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { - AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) - } - fn quote_price_tokens_for_exact_tokens(asset1: Box, asset2: Box, amount: u128, include_fee: bool) -> Option { - AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) - } - fn get_reserves(asset1: Box, asset2: Box) -> Option<(Balance, Balance)> { - AssetConversion::get_reserves(&asset1, &asset2).ok() - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl assets_common::runtime_api::FungiblesApi< - Block, - AccountId, - > for Runtime - { - fn query_account_balances(account: AccountId) -> Result { - use assets_common::fungible_conversion::{convert, convert_balance}; - Ok([ - // collect pallet_balance - { - let balance = Balances::free_balance(account.clone()); - if balance > 0 { - vec![convert_balance::(balance)?] - } else { - vec![] - } - }, - // collect pallet_assets (TrustBackedAssets) - convert::<_, _, _, _, TrustBackedAssetsConvertedConcreteId>( - Assets::account_balances(account.clone()) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect pallet_assets (ForeignAssets) - convert::<_, _, _, _, ForeignAssetsConvertedConcreteId>( - ForeignAssets::account_balances(account.clone()) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect pallet_assets (PoolAssets) - convert::<_, _, _, _, PoolAssetsConvertedConcreteId>( - PoolAssets::account_balances(account) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect ... e.g. other tokens - ].concat().into()) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - - // This is defined once again in dispatch_benchmark, because list_benchmarks! - // and add_benchmarks! are macros exported by define_benchmarks! macros and those types - // are referenced in that call. - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - // Benchmark files generated for `Assets/ForeignAssets` instances are by default - // `pallet_assets_assets.rs / pallet_assets_foreign_assets`, which is not really nice, - // so with this redefinition we can change names to nicer: - // `pallet_assets_local.rs / pallet_assets_foreign.rs`. - type Local = pallet_assets::Pallet::; - type Foreign = pallet_assets::Pallet::; - type Pool = pallet_assets::Pallet::; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; - use sp_storage::TrackedStorageKey; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - impl pallet_xcm::benchmarking::Config for Runtime { - fn reachable_dest() -> Option { - Some(Parent.into()) - } - - fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // Relay/native token can be teleported between AH and Relay. - Some(( - MultiAsset { - fun: Fungible(EXISTENTIAL_DEPOSIT), - id: Concrete(Parent.into()) - }, - Parent.into(), - )) - } - - fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // AH can reserve transfer native token to some random parachain. - let random_para_id = 43211234; - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - random_para_id.into() - ); - Some(( - MultiAsset { - fun: Fungible(EXISTENTIAL_DEPOSIT), - id: Concrete(Parent.into()) - }, - ParentThen(Parachain(random_para_id).into()).into(), - )) - } - } - - use xcm::latest::prelude::*; - use xcm_config::{KsmLocation, MaxAssetsIntoHolding}; - use pallet_xcm_benchmarks::asset_instance_from; - - parameter_types! { - pub ExistentialDepositMultiAsset: Option = Some(( - KsmLocation::get(), - ExistentialDeposit::get() - ).into()); - } - - impl pallet_xcm_benchmarks::Config for Runtime { - type XcmConfig = xcm_config::XcmConfig; - type AccountIdConverter = xcm_config::LocationToAccountId; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositMultiAsset, - xcm_config::PriceForParentDelivery, - >; - fn valid_destination() -> Result { - Ok(KsmLocation::get()) - } - fn worst_case_holding(depositable_count: u32) -> MultiAssets { - // A mix of fungible, non-fungible, and concrete assets. - let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; - let holding_fungibles = holding_non_fungibles.saturating_sub(1); - let fungibles_amount: u128 = 100; - let mut assets = (0..holding_fungibles) - .map(|i| { - MultiAsset { - id: Concrete(GeneralIndex(i as u128).into()), - fun: Fungible(fungibles_amount * i as u128), - } - }) - .chain(core::iter::once(MultiAsset { id: Concrete(Here.into()), fun: Fungible(u128::MAX) })) - .chain((0..holding_non_fungibles).map(|i| MultiAsset { - id: Concrete(GeneralIndex(i as u128).into()), - fun: NonFungible(asset_instance_from(i)), - })) - .collect::>(); - - assets.push(MultiAsset { - id: Concrete(KsmLocation::get()), - fun: Fungible(1_000_000 * UNITS), - }); - assets.into() - } - } - - parameter_types! { - pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - KsmLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(KsmLocation::get()) }, - )); - pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; - } - - impl pallet_xcm_benchmarks::fungible::Config for Runtime { - type TransactAsset = Balances; - - type CheckedAccount = CheckedAccount; - type TrustedTeleporter = TrustedTeleporter; - type TrustedReserve = TrustedReserve; - - fn get_multi_asset() -> MultiAsset { - MultiAsset { - id: Concrete(KsmLocation::get()), - fun: Fungible(UNITS), - } - } - } - - impl pallet_xcm_benchmarks::generic::Config for Runtime { - type TransactAsset = Balances; - type RuntimeCall = RuntimeCall; - - fn worst_case_response() -> (u64, Response) { - (0u64, Response::Version(Default::default())) - } - - fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((KsmLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) - } - - fn subscribe_origin() -> Result { - Ok(KsmLocation::get()) - } - - fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = KsmLocation::get(); - let assets: MultiAssets = (Concrete(KsmLocation::get()), 1_000 * UNITS).into(); - let ticket = MultiLocation { parents: 0, interior: Here }; - Ok((origin, ticket, assets)) - } - - fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn export_message_origin_and_destination( - ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - } - - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - type Local = pallet_assets::Pallet::; - type Foreign = pallet_assets::Pallet::; - type Pool = pallet_assets::Pallet::; - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - //TODO: use from relay_well_known_keys::ACTIVE_CONFIG - hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} - -#[cfg(feature = "state-trie-version-1")] -parameter_types! { - // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) - pub const MigrationSignedDepositPerItem: Balance = CENTS; - pub const MigrationSignedDepositBase: Balance = 2_000 * CENTS; - pub const MigrationMaxKeyLen: u32 = 512; -} - -#[cfg(feature = "state-trie-version-1")] -impl pallet_state_trie_migration::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type SignedDepositPerItem = MigrationSignedDepositPerItem; - type SignedDepositBase = MigrationSignedDepositBase; - // An origin that can control the whole pallet: should be Root, or a part of your council. - type ControlOrigin = frame_system::EnsureSignedBy; - // specific account for the migration, can trigger the signed migrations. - type SignedFilter = frame_system::EnsureSignedBy; - - // Replace this with weight based on your runtime. - type WeightInfo = pallet_state_trie_migration::weights::SubstrateWeight; - - type MaxKeyLen = MigrationMaxKeyLen; -} - -#[cfg(feature = "state-trie-version-1")] -frame_support::ord_parameter_types! { - pub const MigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); - pub const RootMigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); -} - -#[cfg(feature = "state-trie-version-1")] -#[test] -fn ensure_key_ss58() { - use frame_support::traits::SortedMembers; - use sp_core::crypto::Ss58Codec; - let acc = - AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); - //panic!("{:x?}", acc); - assert_eq!(acc, MigController::sorted_members()[0]); - let acc = - AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); - assert_eq!(acc, RootMigController::sorted_members()[0]); - //panic!("{:x?}", acc); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{CENTS, MILLICENTS}; - use parachains_common::kusama::fee; - use sp_runtime::traits::Zero; - use sp_weights::WeightToFee; - - /// We can fit at least 1000 transfers in a block. - #[test] - fn sane_block_weight() { - use pallet_balances::WeightInfo; - let block = RuntimeBlockWeights::get().max_block; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fit = block.checked_div_per_component(&transfer).unwrap_or_default(); - assert!(fit >= 1000, "{} should be at least 1000", fit); - } - - /// The fee for one transfer is at most 1 CENT. - #[test] - fn sane_transfer_fee() { - use pallet_balances::WeightInfo; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&transfer); - assert!(fee <= CENTS, "{} MILLICENTS should be at most 1000", fee / MILLICENTS); - } - - /// Weight is being charged for both dimensions. - #[test] - fn weight_charged_for_both_components() { - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(10_000, 0)); - assert!(!fee.is_zero(), "Charges for ref time"); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, 10_000)); - assert_eq!(fee, CENTS, "10kb maps to CENT"); - } - - /// Filling up a block by proof size is at most 30 times more expensive than ref time. - /// - /// This is just a sanity check. - #[test] - fn full_block_fee_ratio() { - let block = RuntimeBlockWeights::get().max_block; - let time_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(block.ref_time(), 0)); - let proof_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, block.proof_size())); - - let proof_o_time = proof_fee.checked_div(time_fee).unwrap_or_default(); - assert!(proof_o_time <= 30, "{} should be at most 30", proof_o_time); - let time_o_proof = time_fee.checked_div(proof_fee).unwrap_or_default(); - assert!(time_o_proof <= 30, "{} should be at most 30", time_o_proof); - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/block_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/block_weights.rs deleted file mode 100644 index e7fdb2aae2a..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/block_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Importing a block with 0 Extrinsics. - pub const BlockExecutionWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::BlockExecutionWeight::get(); - - // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 100 µs." - ); - // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 50 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index f787aa32701..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// statemint-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/statemint/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_660_000 picoseconds. - Weight::from_parts(1_720_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 28_418 - .saturating_add(Weight::from_parts(24_636_963, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs deleted file mode 100644 index e394e8b837a..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_xcmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// asset-hub-kusama-dev -// --output -// cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_xcmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_u32() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 6_000_000 picoseconds. - Weight::from_parts(6_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn enqueue_xcmp_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `118` - // Estimated: `3517` - // Minimum execution time: 15_000_000 picoseconds. - Weight::from_parts(16_000_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn suspend_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(3_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn resume_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `1596` - // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) - .saturating_add(Weight::from_parts(0, 1596)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn take_first_concatenated_xcm() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) - /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65747` - // Estimated: `69212` - // Minimum execution time: 62_000_000 picoseconds. - Weight::from_parts(66_000_000, 0) - .saturating_add(Weight::from_parts(0, 69212)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65710` - // Estimated: `69175` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(43_000_000, 0) - .saturating_add(Weight::from_parts(0, 69175)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/extrinsic_weights.rs deleted file mode 100644 index 1a4adb968bb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/extrinsic_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Executing a NO-OP `System::remarks` Extrinsic. - pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::ExtrinsicBaseWeight::get(); - - // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 10 µs." - ); - // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs deleted file mode 100644 index 6304051e6cb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_106_000 picoseconds. - Weight::from_parts(1_884_213, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(388, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_528_000 picoseconds. - Weight::from_parts(27_081_927, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_730, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_882_000 picoseconds. - Weight::from_parts(4_149_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 103_389_161_000 picoseconds. - Weight::from_parts(106_870_091_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_236_000 picoseconds. - Weight::from_parts(2_302_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_045 - .saturating_add(Weight::from_parts(763_456, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_175_000 picoseconds. - Weight::from_parts(2_238_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_040 - .saturating_add(Weight::from_parts(571_397, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `84 + p * (69 ±0)` - // Estimated: `80 + p * (70 ±0)` - // Minimum execution time: 3_843_000 picoseconds. - Weight::from_parts(3_947_000, 0) - .saturating_add(Weight::from_parts(0, 80)) - // Standard Error: 2_188 - .saturating_add(Weight::from_parts(1_212_360, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs deleted file mode 100644 index f04081a84fb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/mod.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; -pub mod cumulus_pallet_parachain_system; -pub mod cumulus_pallet_xcmp_queue; -pub mod extrinsic_weights; -pub mod frame_system; -pub mod pallet_asset_conversion; -pub mod pallet_assets_foreign; -pub mod pallet_assets_local; -pub mod pallet_assets_pool; -pub mod pallet_balances; -pub mod pallet_collator_selection; -pub mod pallet_message_queue; -pub mod pallet_multisig; -pub mod pallet_nft_fractionalization; -pub mod pallet_nfts; -pub mod pallet_proxy; -pub mod pallet_session; -pub mod pallet_timestamp; -pub mod pallet_uniques; -pub mod pallet_utility; -pub mod pallet_xcm; -pub mod paritydb_weights; -pub mod rocksdb_weights; -pub mod xcm; - -pub use block_weights::constants::BlockExecutionWeight; -pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; -pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs deleted file mode 100644 index 3fcf2f8f4ec..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_asset_conversion.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_asset_conversion` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/cumulus/.git/.artifacts/bench.json -// --pallet=pallet_asset_conversion -// --chain=asset-hub-kusama-dev -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_asset_conversion`. -pub struct WeightInfo(PhantomData); -impl pallet_asset_conversion::WeightInfo for WeightInfo { - /// Storage: `AssetConversion::Pools` (r:1 w:1) - /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x76a2c49709deec21d9c05f96c1f47351` (r:1 w:0) - /// Storage: `System::Account` (r:2 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `AssetConversion::NextPoolAssetId` (r:1 w:1) - /// Proof: `AssetConversion::NextPoolAssetId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn create_pool() -> Weight { - // Proof Size summary in bytes: - // Measured: `480` - // Estimated: `6196` - // Minimum execution time: 88_484_000 picoseconds. - Weight::from_parts(92_964_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `AssetConversion::Pools` (r:1 w:0) - /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn add_liquidity() -> Weight { - // Proof Size summary in bytes: - // Measured: `1117` - // Estimated: `7404` - // Minimum execution time: 153_015_000 picoseconds. - Weight::from_parts(157_018_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `AssetConversion::Pools` (r:1 w:0) - /// Proof: `AssetConversion::Pools` (`max_values`: None, `max_size`: Some(1224), added: 3699, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) - /// Proof: UNKNOWN KEY `0x2433d831722b1f4aeb1666953f1c0e77` (r:1 w:0) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn remove_liquidity() -> Weight { - // Proof Size summary in bytes: - // Measured: `1106` - // Estimated: `7404` - // Minimum execution time: 141_726_000 picoseconds. - Weight::from_parts(147_865_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `ForeignAssets::Asset` (r:2 w:2) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:4 w:4) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn swap_exact_tokens_for_tokens() -> Weight { - // Proof Size summary in bytes: - // Measured: `1148` - // Estimated: `13818` - // Minimum execution time: 168_619_000 picoseconds. - Weight::from_parts(174_283_000, 0) - .saturating_add(Weight::from_parts(0, 13818)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:2 w:2) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:4 w:4) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn swap_tokens_for_exact_tokens() -> Weight { - // Proof Size summary in bytes: - // Measured: `1148` - // Estimated: `13818` - // Minimum execution time: 171_565_000 picoseconds. - Weight::from_parts(173_702_000, 0) - .saturating_add(Weight::from_parts(0, 13818)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs deleted file mode 100644 index c2688d97905..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_foreign.rs +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-20, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/cumulus/.git/.artifacts/bench.json -// --pallet=pallet_assets -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `4273` - // Minimum execution time: 30_485_000 picoseconds. - Weight::from_parts(31_007_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `4` - // Estimated: `4273` - // Minimum execution time: 12_991_000 picoseconds. - Weight::from_parts(13_304_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 15_689_000 picoseconds. - Weight::from_parts(16_063_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: ForeignAssets Asset (r:1 w:1) - /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) - /// Storage: ForeignAssets Account (r:1001 w:1000) - /// Proof: ForeignAssets Account (max_values: None, max_size: Some(732), added: 3207, mode: MaxEncodedLen) - /// Storage: System Account (r:1000 w:1000) - /// Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `4273 + c * (3207 ±0)` - // Minimum execution time: 18_533_000 picoseconds. - Weight::from_parts(18_791_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 5_059 - .saturating_add(Weight::from_parts(12_049_659, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 3207).saturating_mul(c.into())) - } - /// Storage: ForeignAssets Asset (r:1 w:1) - /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) - /// Storage: ForeignAssets Approvals (r:1001 w:1000) - /// Proof: ForeignAssets Approvals (max_values: None, max_size: Some(746), added: 3221, mode: MaxEncodedLen) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `413 + a * (86 ±0)` - // Estimated: `4273 + a * (3221 ±0)` - // Minimum execution time: 20_028_000 picoseconds. - Weight::from_parts(20_148_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 3_401 - .saturating_add(Weight::from_parts(13_897_319, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 3221).saturating_mul(a.into())) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:0) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_949_000 picoseconds. - Weight::from_parts(16_241_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 27_156_000 picoseconds. - Weight::from_parts(28_182_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 33_503_000 picoseconds. - Weight::from_parts(33_860_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 45_065_000 picoseconds. - Weight::from_parts(45_856_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 39_913_000 picoseconds. - Weight::from_parts(40_791_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 45_337_000 picoseconds. - Weight::from_parts(45_980_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 19_012_000 picoseconds. - Weight::from_parts(19_326_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_656_000 picoseconds. - Weight::from_parts(19_205_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 15_440_000 picoseconds. - Weight::from_parts(15_825_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 15_465_000 picoseconds. - Weight::from_parts(15_769_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:0) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 16_579_000 picoseconds. - Weight::from_parts(16_931_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_138_000 picoseconds. - Weight::from_parts(15_435_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: ForeignAssets Asset (r:1 w:0) - /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) - /// Storage: ForeignAssets Metadata (r:1 w:1) - /// Proof: ForeignAssets Metadata (max_values: None, max_size: Some(738), added: 3213, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, _s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 29_846_000 picoseconds. - Weight::from_parts(31_607_649, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `4273` - // Minimum execution time: 30_582_000 picoseconds. - Weight::from_parts(31_008_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: ForeignAssets Asset (r:1 w:0) - /// Proof: ForeignAssets Asset (max_values: None, max_size: Some(808), added: 3283, mode: MaxEncodedLen) - /// Storage: ForeignAssets Metadata (r:1 w:1) - /// Proof: ForeignAssets Metadata (max_values: None, max_size: Some(738), added: 3213, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `81` - // Estimated: `4273` - // Minimum execution time: 14_186_000 picoseconds. - Weight::from_parts(14_717_332, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 517 - .saturating_add(Weight::from_parts(2_595, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `4273` - // Minimum execution time: 29_499_000 picoseconds. - Weight::from_parts(29_918_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 13_815_000 picoseconds. - Weight::from_parts(14_138_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 33_029_000 picoseconds. - Weight::from_parts(33_524_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `520` - // Estimated: `7404` - // Minimum execution time: 63_205_000 picoseconds. - Weight::from_parts(64_078_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `446` - // Estimated: `4273` - // Minimum execution time: 34_948_000 picoseconds. - Weight::from_parts(35_484_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `446` - // Estimated: `4273` - // Minimum execution time: 35_722_000 picoseconds. - Weight::from_parts(36_266_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_855_000 picoseconds. - Weight::from_parts(16_182_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `345` - // Estimated: `4273` - // Minimum execution time: 34_984_000 picoseconds. - Weight::from_parts(35_512_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 33_041_000 picoseconds. - Weight::from_parts(34_124_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `471` - // Estimated: `4273` - // Minimum execution time: 31_728_000 picoseconds. - Weight::from_parts(32_012_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `4273` - // Minimum execution time: 29_432_000 picoseconds. - Weight::from_parts(29_968_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_827_000 picoseconds. - Weight::from_parts(19_172_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs deleted file mode 100644 index 957e33fcd9e..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_local.rs +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_assets -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3675` - // Minimum execution time: 26_510_000 picoseconds. - Weight::from_parts(27_332_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3675` - // Minimum execution time: 10_899_000 picoseconds. - Weight::from_parts(11_395_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_593_000 picoseconds. - Weight::from_parts(14_108_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1001 w:1000) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1000 w:1000) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 16_216_000 picoseconds. - Weight::from_parts(16_636_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 9_346 - .saturating_add(Weight::from_parts(15_306_152, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1001 w:1000) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `414 + a * (86 ±0)` - // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 16_745_000 picoseconds. - Weight::from_parts(17_247_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(Weight::from_parts(15_634_963, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:0) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_650_000 picoseconds. - Weight::from_parts(14_721_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 24_121_000 picoseconds. - Weight::from_parts(25_023_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 31_414_000 picoseconds. - Weight::from_parts(32_235_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 43_114_000 picoseconds. - Weight::from_parts(44_106_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 37_954_000 picoseconds. - Weight::from_parts(38_772_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 43_051_000 picoseconds. - Weight::from_parts(44_003_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 17_048_000 picoseconds. - Weight::from_parts(17_614_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_705_000 picoseconds. - Weight::from_parts(17_581_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_284_000 picoseconds. - Weight::from_parts(13_735_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_030_000 picoseconds. - Weight::from_parts(13_417_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:0) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 14_174_000 picoseconds. - Weight::from_parts(14_660_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 12_737_000 picoseconds. - Weight::from_parts(13_172_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 27_707_000 picoseconds. - Weight::from_parts(29_036_880, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 688 - .saturating_add(Weight::from_parts(2_426, 0).saturating_mul(n.into())) - // Standard Error: 688 - .saturating_add(Weight::from_parts(776, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `407` - // Estimated: `3675` - // Minimum execution time: 28_514_000 picoseconds. - Weight::from_parts(29_216_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `3675` - // Minimum execution time: 12_452_000 picoseconds. - Weight::from_parts(13_095_356, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 275 - .saturating_add(Weight::from_parts(826, 0).saturating_mul(n.into())) - // Standard Error: 275 - .saturating_add(Weight::from_parts(808, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `407` - // Estimated: `3675` - // Minimum execution time: 28_181_000 picoseconds. - Weight::from_parts(29_050_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 12_253_000 picoseconds. - Weight::from_parts(12_545_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 31_084_000 picoseconds. - Weight::from_parts(32_052_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `521` - // Estimated: `6208` - // Minimum execution time: 61_756_000 picoseconds. - Weight::from_parts(62_740_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3675` - // Minimum execution time: 33_370_000 picoseconds. - Weight::from_parts(34_127_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3675` - // Minimum execution time: 33_753_000 picoseconds. - Weight::from_parts(34_613_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_508_000 picoseconds. - Weight::from_parts(13_997_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `346` - // Estimated: `3675` - // Minimum execution time: 32_578_000 picoseconds. - Weight::from_parts(33_675_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 30_768_000 picoseconds. - Weight::from_parts(31_710_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `472` - // Estimated: `3675` - // Minimum execution time: 30_028_000 picoseconds. - Weight::from_parts(30_793_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `402` - // Estimated: `3675` - // Minimum execution time: 28_354_000 picoseconds. - Weight::from_parts(29_097_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_607_000 picoseconds. - Weight::from_parts(17_433_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs deleted file mode 100644 index e0b4ff36552..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_assets_pool.rs +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/cumulus/.git/.artifacts/bench.json -// --pallet=pallet_assets -// --chain=asset-hub-kusama-dev -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3675` - // Minimum execution time: 11_591_000 picoseconds. - Weight::from_parts(11_901_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3675` - // Minimum execution time: 11_184_000 picoseconds. - Weight::from_parts(11_640_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `3675` - // Minimum execution time: 13_809_000 picoseconds. - Weight::from_parts(14_226_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1001 w:1000) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1000 w:1000) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 16_439_000 picoseconds. - Weight::from_parts(16_743_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 4_792 - .saturating_add(Weight::from_parts(14_463_991, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1001 w:1000) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `451 + a * (86 ±0)` - // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 17_218_000 picoseconds. - Weight::from_parts(17_585_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 2_056 - .saturating_add(Weight::from_parts(5_323_866, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:0) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 13_848_000 picoseconds. - Weight::from_parts(14_325_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 24_904_000 picoseconds. - Weight::from_parts(25_607_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3675` - // Minimum execution time: 31_477_000 picoseconds. - Weight::from_parts(32_338_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `6208` - // Minimum execution time: 42_994_000 picoseconds. - Weight::from_parts(44_041_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `6208` - // Minimum execution time: 37_551_000 picoseconds. - Weight::from_parts(38_648_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `6208` - // Minimum execution time: 42_829_000 picoseconds. - Weight::from_parts(44_029_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3675` - // Minimum execution time: 17_304_000 picoseconds. - Weight::from_parts(17_782_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3675` - // Minimum execution time: 17_040_000 picoseconds. - Weight::from_parts(17_698_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `3675` - // Minimum execution time: 13_238_000 picoseconds. - Weight::from_parts(13_810_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `3675` - // Minimum execution time: 13_034_000 picoseconds. - Weight::from_parts(13_603_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:0) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 14_357_000 picoseconds. - Weight::from_parts(14_774_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 13_040_000 picoseconds. - Weight::from_parts(13_616_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:1) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 15_274_000 picoseconds. - Weight::from_parts(16_096_881, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 239 - .saturating_add(Weight::from_parts(1_631, 0).saturating_mul(n.into())) - // Standard Error: 239 - .saturating_add(Weight::from_parts(2_334, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:1) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `444` - // Estimated: `3675` - // Minimum execution time: 15_900_000 picoseconds. - Weight::from_parts(16_526_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:1) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `3675` - // Minimum execution time: 13_391_000 picoseconds. - Weight::from_parts(14_047_176, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 172 - .saturating_add(Weight::from_parts(2_617, 0).saturating_mul(n.into())) - // Standard Error: 172 - .saturating_add(Weight::from_parts(2_081, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Metadata` (r:1 w:1) - /// Proof: `PoolAssets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `444` - // Estimated: `3675` - // Minimum execution time: 15_794_000 picoseconds. - Weight::from_parts(16_279_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 12_538_000 picoseconds. - Weight::from_parts(13_080_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1 w:1) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `314` - // Estimated: `3675` - // Minimum execution time: 18_991_000 picoseconds. - Weight::from_parts(19_812_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1 w:1) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:2 w:2) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `558` - // Estimated: `6208` - // Minimum execution time: 50_336_000 picoseconds. - Weight::from_parts(51_441_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1 w:1) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `484` - // Estimated: `3675` - // Minimum execution time: 21_195_000 picoseconds. - Weight::from_parts(21_946_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Approvals` (r:1 w:1) - /// Proof: `PoolAssets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `484` - // Estimated: `3675` - // Minimum execution time: 21_568_000 picoseconds. - Weight::from_parts(22_366_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 13_690_000 picoseconds. - Weight::from_parts(14_086_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 18_240_000 picoseconds. - Weight::from_parts(19_000_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `280` - // Estimated: `3675` - // Minimum execution time: 18_469_000 picoseconds. - Weight::from_parts(19_040_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `3675` - // Minimum execution time: 14_633_000 picoseconds. - Weight::from_parts(15_296_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Asset` (r:1 w:1) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `439` - // Estimated: `3675` - // Minimum execution time: 14_751_000 picoseconds. - Weight::from_parts(15_312_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PoolAssets::Asset` (r:1 w:0) - /// Proof: `PoolAssets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `PoolAssets::Account` (r:1 w:1) - /// Proof: `PoolAssets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3675` - // Minimum execution time: 16_930_000 picoseconds. - Weight::from_parts(17_653_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs deleted file mode 100644 index 79c88f30580..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_balances.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_balances` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_balances`. -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_allow_death() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 55_040_000 picoseconds. - Weight::from_parts(56_106_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 41_342_000 picoseconds. - Weight::from_parts(41_890_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_creating() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 14_723_000 picoseconds. - Weight::from_parts(15_182_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_killing() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 22_073_000 picoseconds. - Weight::from_parts(22_638_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 57_265_000 picoseconds. - Weight::from_parts(58_222_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_all() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 51_485_000 picoseconds. - Weight::from_parts(52_003_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_unreserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 17_460_000 picoseconds. - Weight::from_parts(17_849_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:999 w:999) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 17_259_000 picoseconds. - Weight::from_parts(17_478_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 16_756 - .saturating_add(Weight::from_parts(15_291_954, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs deleted file mode 100644 index c686bd6134a..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `163 + b * (79 ±0)` - // Estimated: `1154 + b * (2555 ±0)` - // Minimum execution time: 15_408_000 picoseconds. - Weight::from_parts(13_068_592, 0) - .saturating_add(Weight::from_parts(0, 1154)) - // Standard Error: 7_395 - .saturating_add(Weight::from_parts(3_219_916, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `756 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 49_692_000 picoseconds. - Weight::from_parts(51_768_986, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 18_404 - .saturating_add(Weight::from_parts(55_676, 0).saturating_mul(b.into())) - // Standard Error: 3_488 - .saturating_add(Weight::from_parts(184_343, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `119 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 16_486_000 picoseconds. - Weight::from_parts(16_646_017, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 3_230 - .saturating_add(Weight::from_parts(148_941, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_806_000 picoseconds. - Weight::from_parts(8_002_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_937_000 picoseconds. - Weight::from_parts(8_161_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `736 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 42_805_000 picoseconds. - Weight::from_parts(45_979_502, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_336 - .saturating_add(Weight::from_parts(221_049, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[4, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn update_bond(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn take_candidate_slot(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 46_989_000 picoseconds. - Weight::from_parts(48_151_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 17_547_000 picoseconds. - Weight::from_parts(17_854_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 370_637 - .saturating_add(Weight::from_parts(15_798_857, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs deleted file mode 100644 index 792b7d18b67..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// statemint-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/statemint/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 18_976_000 picoseconds. - Weight::from_parts(18_976_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 12_686_000 picoseconds. - Weight::from_parts(12_686_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 4_951_000 picoseconds. - Weight::from_parts(4_951_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_023_000 picoseconds. - Weight::from_parts(6_023_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_901_000 picoseconds. - Weight::from_parts(6_901_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 58_503_000 picoseconds. - Weight::from_parts(58_503_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 9_318_000 picoseconds. - Weight::from_parts(9_318_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 52_228_000 picoseconds. - Weight::from_parts(52_228_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 59_617_000 picoseconds. - Weight::from_parts(59_617_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 69_681_000 picoseconds. - Weight::from_parts(69_681_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs deleted file mode 100644 index d2e0f0ec7f0..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_multisig` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_multisig`. -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_714_000 picoseconds. - Weight::from_parts(14_440_231, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(598, 0).saturating_mul(z.into())) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `262 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 44_768_000 picoseconds. - Weight::from_parts(33_662_218, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_633 - .saturating_add(Weight::from_parts(128_927, 0).saturating_mul(s.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_543, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 29_745_000 picoseconds. - Weight::from_parts(20_559_891, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 914 - .saturating_add(Weight::from_parts(103_601, 0).saturating_mul(s.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_504, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `385 + s * (33 ±0)` - // Estimated: `6811` - // Minimum execution time: 51_506_000 picoseconds. - Weight::from_parts(36_510_777, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 2_183 - .saturating_add(Weight::from_parts(183_764, 0).saturating_mul(s.into())) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_653, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 31_072_000 picoseconds. - Weight::from_parts(32_408_621, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 913 - .saturating_add(Weight::from_parts(121_410, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 18_301_000 picoseconds. - Weight::from_parts(18_223_547, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 747 - .saturating_add(Weight::from_parts(114_584, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + s * (1 ±0)` - // Estimated: `6811` - // Minimum execution time: 32_107_000 picoseconds. - Weight::from_parts(33_674_827, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_220 - .saturating_add(Weight::from_parts(122_011, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs deleted file mode 100644 index 4becc569514..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nft_fractionalization.rs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_nft_fractionalization` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_nft_fractionalization -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_nft_fractionalization`. -pub struct WeightInfo(PhantomData); -impl pallet_nft_fractionalization::WeightInfo for WeightInfo { - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// Storage: `NftFractionalization::NftToAsset` (r:0 w:1) - /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - fn fractionalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `462` - // Estimated: `4326` - // Minimum execution time: 178_501_000 picoseconds. - Weight::from_parts(180_912_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(8)) - } - /// Storage: `NftFractionalization::NftToAsset` (r:1 w:1) - /// Proof: `NftFractionalization::NftToAsset` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(67), added: 2542, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn unify() -> Weight { - // Proof Size summary in bytes: - // Measured: `1275` - // Estimated: `4326` - // Minimum execution time: 125_253_000 picoseconds. - Weight::from_parts(128_238_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(10)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs deleted file mode 100644 index 7a51830799a..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_nfts.rs +++ /dev/null @@ -1,772 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_nfts` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_nfts -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_nfts`. -pub struct WeightInfo(PhantomData); -impl pallet_nfts::WeightInfo for WeightInfo { - /// Storage: `Nfts::NextCollectionId` (r:1 w:1) - /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `179` - // Estimated: `3549` - // Minimum execution time: 39_124_000 picoseconds. - Weight::from_parts(39_975_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::NextCollectionId` (r:1 w:1) - /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3549` - // Minimum execution time: 23_444_000 picoseconds. - Weight::from_parts(23_857_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1001 w:1000) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// The range of component `m` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32204 + a * (366 ±0)` - // Estimated: `2523990 + a * (2954 ±0)` - // Minimum execution time: 1_224_365_000 picoseconds. - Weight::from_parts(1_281_136_346, 0) - .saturating_add(Weight::from_parts(0, 2523990)) - // Standard Error: 10_484 - .saturating_add(Weight::from_parts(6_910_740, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(1004)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1005)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) - } - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `455` - // Estimated: `4326` - // Minimum execution time: 50_489_000 picoseconds. - Weight::from_parts(51_045_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn force_mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `455` - // Estimated: `4326` - // Minimum execution time: 49_146_000 picoseconds. - Weight::from_parts(49_756_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `564` - // Estimated: `4326` - // Minimum execution time: 56_059_000 picoseconds. - Weight::from_parts(57_162_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:2) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `593` - // Estimated: `4326` - // Minimum execution time: 42_406_000 picoseconds. - Weight::from_parts(43_187_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:5000 w:5000) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn redeposit(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `763 + i * (108 ±0)` - // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 16_960_000 picoseconds. - Weight::from_parts(17_167_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - // Standard Error: 24_110 - .saturating_add(Weight::from_parts(18_046_970, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn lock_item_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `3534` - // Minimum execution time: 21_023_000 picoseconds. - Weight::from_parts(21_409_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn unlock_item_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `3534` - // Minimum execution time: 20_706_000 picoseconds. - Weight::from_parts(21_030_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn lock_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `3549` - // Minimum execution time: 17_449_000 picoseconds. - Weight::from_parts(17_804_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:2) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `388` - // Estimated: `3549` - // Minimum execution time: 22_958_000 picoseconds. - Weight::from_parts(23_499_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `369` - // Estimated: `6078` - // Minimum execution time: 40_105_000 picoseconds. - Weight::from_parts(40_800_000, 0) - .saturating_add(Weight::from_parts(0, 6078)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:2) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_collection_owner() -> Weight { - // Proof Size summary in bytes: - // Measured: `311` - // Estimated: `3549` - // Minimum execution time: 17_832_000 picoseconds. - Weight::from_parts(18_297_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn force_collection_config() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `3549` - // Minimum execution time: 15_027_000 picoseconds. - Weight::from_parts(15_370_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn lock_item_properties() -> Weight { - // Proof Size summary in bytes: - // Measured: `435` - // Estimated: `3534` - // Minimum execution time: 19_912_000 picoseconds. - Weight::from_parts(20_258_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - fn set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `539` - // Estimated: `3944` - // Minimum execution time: 50_138_000 picoseconds. - Weight::from_parts(50_971_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - fn force_set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `344` - // Estimated: `3944` - // Minimum execution time: 26_385_000 picoseconds. - Weight::from_parts(27_086_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - fn clear_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `983` - // Estimated: `3944` - // Minimum execution time: 45_687_000 picoseconds. - Weight::from_parts(47_107_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - fn approve_item_attributes() -> Weight { - // Proof Size summary in bytes: - // Measured: `381` - // Estimated: `4466` - // Minimum execution time: 18_065_000 picoseconds. - Weight::from_parts(18_371_000, 0) - .saturating_add(Weight::from_parts(0, 4466)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1001 w:1000) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - fn cancel_item_attributes_approval(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `760 + n * (398 ±0)` - // Estimated: `4466 + n * (2954 ±0)` - // Minimum execution time: 26_680_000 picoseconds. - Weight::from_parts(27_010_000, 0) - .saturating_add(Weight::from_parts(0, 4466)) - // Standard Error: 6_351 - .saturating_add(Weight::from_parts(6_584_290, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - fn set_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `539` - // Estimated: `3812` - // Minimum execution time: 42_038_000 picoseconds. - Weight::from_parts(42_758_000, 0) - .saturating_add(Weight::from_parts(0, 3812)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `849` - // Estimated: `3812` - // Minimum execution time: 40_220_000 picoseconds. - Weight::from_parts(41_026_000, 0) - .saturating_add(Weight::from_parts(0, 3812)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - fn set_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `398` - // Estimated: `3759` - // Minimum execution time: 38_135_000 picoseconds. - Weight::from_parts(38_561_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - fn clear_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `716` - // Estimated: `3759` - // Minimum execution time: 37_583_000 picoseconds. - Weight::from_parts(38_215_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `410` - // Estimated: `4326` - // Minimum execution time: 21_405_000 picoseconds. - Weight::from_parts(21_803_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `418` - // Estimated: `4326` - // Minimum execution time: 18_713_000 picoseconds. - Weight::from_parts(19_185_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn clear_all_transfer_approvals() -> Weight { - // Proof Size summary in bytes: - // Measured: `418` - // Estimated: `4326` - // Minimum execution time: 17_803_000 picoseconds. - Weight::from_parts(18_270_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_accept_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3517` - // Minimum execution time: 15_982_000 picoseconds. - Weight::from_parts(16_700_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - fn set_collection_max_supply() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `3549` - // Minimum execution time: 19_501_000 picoseconds. - Weight::from_parts(19_785_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn update_mint_settings() -> Weight { - // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `3538` - // Minimum execution time: 18_914_000 picoseconds. - Weight::from_parts(19_292_000, 0) - .saturating_add(Weight::from_parts(0, 3538)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn set_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `518` - // Estimated: `4326` - // Minimum execution time: 24_625_000 picoseconds. - Weight::from_parts(25_257_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:2) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn buy_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `705` - // Estimated: `4326` - // Minimum execution time: 50_833_000 picoseconds. - Weight::from_parts(52_161_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// The range of component `n` is `[0, 10]`. - fn pay_tips(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_220_000 picoseconds. - Weight::from_parts(3_476_001, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7_084 - .saturating_add(Weight::from_parts(3_844_820, 0).saturating_mul(n.into())) - } - /// Storage: `Nfts::Item` (r:2 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn create_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `494` - // Estimated: `7662` - // Minimum execution time: 21_983_000 picoseconds. - Weight::from_parts(22_746_000, 0) - .saturating_add(Weight::from_parts(0, 7662)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn cancel_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `513` - // Estimated: `4326` - // Minimum execution time: 20_875_000 picoseconds. - Weight::from_parts(21_465_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:2 w:2) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:2 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:4) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn claim_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `834` - // Estimated: `7662` - // Minimum execution time: 84_771_000 picoseconds. - Weight::from_parts(86_078_000, 0) - .saturating_add(Weight::from_parts(0, 7662)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(10)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:10 w:10) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 10]`. - fn mint_pre_signed(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `558` - // Estimated: `6078 + n * (2954 ±0)` - // Minimum execution time: 143_265_000 picoseconds. - Weight::from_parts(150_978_773, 0) - .saturating_add(Weight::from_parts(0, 6078)) - // Standard Error: 49_443 - .saturating_add(Weight::from_parts(31_888_255, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(6)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:10 w:10) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 10]`. - fn set_attributes_pre_signed(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `588` - // Estimated: `4466 + n * (2954 ±0)` - // Minimum execution time: 83_754_000 picoseconds. - Weight::from_parts(96_685_026, 0) - .saturating_add(Weight::from_parts(0, 4466)) - // Standard Error: 72_592 - .saturating_add(Weight::from_parts(30_914_858, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs deleted file mode 100644 index 0cdffc653bc..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_proxy.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_proxy` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_proxy -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_proxy`. -pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 16_417_000 picoseconds. - Weight::from_parts(17_283_443, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_409 - .saturating_add(Weight::from_parts(32_123, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn proxy_announced(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 37_572_000 picoseconds. - Weight::from_parts(37_045_756, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_896 - .saturating_add(Weight::from_parts(139_561, 0).saturating_mul(a.into())) - // Standard Error: 2_993 - .saturating_add(Weight::from_parts(73_270, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 24_066_000 picoseconds. - Weight::from_parts(24_711_403, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_626 - .saturating_add(Weight::from_parts(128_391, 0).saturating_mul(a.into())) - // Standard Error: 1_680 - .saturating_add(Weight::from_parts(23_124, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 24_162_000 picoseconds. - Weight::from_parts(23_928_058, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 2_072 - .saturating_add(Weight::from_parts(152_299, 0).saturating_mul(a.into())) - // Standard Error: 2_141 - .saturating_add(Weight::from_parts(39_775, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn announce(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `386 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 33_858_000 picoseconds. - Weight::from_parts(33_568_059, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_816 - .saturating_add(Weight::from_parts(134_400, 0).saturating_mul(a.into())) - // Standard Error: 1_876 - .saturating_add(Weight::from_parts(57_028, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn add_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 24_947_000 picoseconds. - Weight::from_parts(26_235_199, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_363 - .saturating_add(Weight::from_parts(41_435, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 25_186_000 picoseconds. - Weight::from_parts(26_823_133, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_259 - .saturating_add(Weight::from_parts(34_224, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxies(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 22_156_000 picoseconds. - Weight::from_parts(23_304_060, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_738 - .saturating_add(Weight::from_parts(39_612, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `4706` - // Minimum execution time: 26_914_000 picoseconds. - Weight::from_parts(28_009_062, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_978 - .saturating_add(Weight::from_parts(12_255, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 30]`. - fn kill_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 23_281_000 picoseconds. - Weight::from_parts(24_392_989, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_943 - .saturating_add(Weight::from_parts(30_287, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs deleted file mode 100644 index 73c3c06945d..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_session.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_session` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_session`. -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:1 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn set_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `270` - // Estimated: `3735` - // Minimum execution time: 16_932_000 picoseconds. - Weight::from_parts(17_357_000, 0) - .saturating_add(Weight::from_parts(0, 3735)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:0 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn purge_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `3707` - // Minimum execution time: 12_157_000 picoseconds. - Weight::from_parts(12_770_000, 0) - .saturating_add(Weight::from_parts(0, 3707)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs deleted file mode 100644 index e27289a49e9..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `86` - // Estimated: `1493` - // Minimum execution time: 9_313_000 picoseconds. - Weight::from_parts(9_775_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_322_000 picoseconds. - Weight::from_parts(3_577_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs deleted file mode 100644 index 69d3e773afb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_uniques.rs +++ /dev/null @@ -1,466 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_uniques` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_uniques -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_uniques`. -pub struct WeightInfo(PhantomData); -impl pallet_uniques::WeightInfo for WeightInfo { - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3643` - // Minimum execution time: 28_845_000 picoseconds. - Weight::from_parts(29_675_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3643` - // Minimum execution time: 13_492_000 picoseconds. - Weight::from_parts(14_049_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1001 w:1000) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1000 w:1000) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1000) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - /// The range of component `m` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `257 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` - // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` - // Minimum execution time: 2_920_070_000 picoseconds. - Weight::from_parts(2_983_862_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - // Standard Error: 36_415 - .saturating_add(Weight::from_parts(7_589_778, 0).saturating_mul(n.into())) - // Standard Error: 36_415 - .saturating_add(Weight::from_parts(479_496, 0).saturating_mul(m.into())) - // Standard Error: 36_415 - .saturating_add(Weight::from_parts(562_056, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:0) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 35_329_000 picoseconds. - Weight::from_parts(36_019_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 36_474_000 picoseconds. - Weight::from_parts(37_190_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:2) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 26_786_000 picoseconds. - Weight::from_parts(27_400_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:5000 w:5000) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn redeposit(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `738 + i * (76 ±0)` - // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 14_546_000 picoseconds. - Weight::from_parts(14_831_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - // Standard Error: 24_362 - .saturating_add(Weight::from_parts(17_972_938, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 2597).saturating_mul(i.into())) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 18_919_000 picoseconds. - Weight::from_parts(19_547_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 18_643_000 picoseconds. - Weight::from_parts(19_000_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn freeze_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_530_000 picoseconds. - Weight::from_parts(14_165_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn thaw_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_523_000 picoseconds. - Weight::from_parts(14_055_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:2) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `356` - // Estimated: `3643` - // Minimum execution time: 22_131_000 picoseconds. - Weight::from_parts(22_628_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_841_000 picoseconds. - Weight::from_parts(14_408_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_item_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 16_954_000 picoseconds. - Weight::from_parts(17_482_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - fn set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `3652` - // Minimum execution time: 38_493_000 picoseconds. - Weight::from_parts(39_513_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - fn clear_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `756` - // Estimated: `3652` - // Minimum execution time: 37_918_000 picoseconds. - Weight::from_parts(38_666_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - fn set_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `348` - // Estimated: `3652` - // Minimum execution time: 29_810_000 picoseconds. - Weight::from_parts(30_363_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `3652` - // Minimum execution time: 30_877_000 picoseconds. - Weight::from_parts(31_430_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - fn set_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 30_478_000 picoseconds. - Weight::from_parts(31_065_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - fn clear_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `473` - // Estimated: `3643` - // Minimum execution time: 29_582_000 picoseconds. - Weight::from_parts(30_160_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 19_328_000 picoseconds. - Weight::from_parts(19_866_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `3643` - // Minimum execution time: 19_131_000 picoseconds. - Weight::from_parts(19_569_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_accept_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3517` - // Minimum execution time: 15_212_000 picoseconds. - Weight::from_parts(15_691_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:1) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn set_collection_max_supply() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 16_290_000 picoseconds. - Weight::from_parts(16_654_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:0) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn set_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `259` - // Estimated: `3587` - // Minimum execution time: 16_095_000 picoseconds. - Weight::from_parts(16_555_000, 0) - .saturating_add(Weight::from_parts(0, 3587)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:1 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:2) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn buy_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `540` - // Estimated: `3643` - // Minimum execution time: 35_506_000 picoseconds. - Weight::from_parts(36_305_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs deleted file mode 100644 index e6c3e1295ef..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_103_000 picoseconds. - Weight::from_parts(7_226_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_732 - .saturating_add(Weight::from_parts(6_560_347, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_208_000 picoseconds. - Weight::from_parts(5_480_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_070_000 picoseconds. - Weight::from_parts(1_321_270, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_454 - .saturating_add(Weight::from_parts(6_864_640, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_255_000 picoseconds. - Weight::from_parts(9_683_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_852_000 picoseconds. - Weight::from_parts(7_007_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_745 - .saturating_add(Weight::from_parts(6_562_902, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs deleted file mode 100644 index 1e4a723e10f..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/pallet_xcm.rs +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 25_043_000 picoseconds. - Weight::from_parts(25_670_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 18_893_000 picoseconds. - Weight::from_parts(19_261_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 14_107_000 picoseconds. - Weight::from_parts(14_500_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_175_000 picoseconds. - Weight::from_parts(7_493_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_default_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_162_000 picoseconds. - Weight::from_parts(2_278_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_subscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 29_144_000 picoseconds. - Weight::from_parts(30_134_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_unsubscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `363` - // Estimated: `3828` - // Minimum execution time: 31_522_000 picoseconds. - Weight::from_parts(32_679_000, 0) - .saturating_add(Weight::from_parts(0, 3828)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) - /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_suspension() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_338_000 picoseconds. - Weight::from_parts(2_494_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_supported_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 17_315_000 picoseconds. - Weight::from_parts(17_787_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notifiers() -> Weight { - // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 17_273_000 picoseconds. - Weight::from_parts(17_712_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn already_notified_target() -> Weight { - // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_395_000 picoseconds. - Weight::from_parts(19_095_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn notify_current_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `212` - // Estimated: `6152` - // Minimum execution time: 27_343_000 picoseconds. - Weight::from_parts(28_068_000, 0) - .saturating_add(Weight::from_parts(0, 6152)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn notify_target_migration_fail() -> Weight { - // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_156_000 picoseconds. - Weight::from_parts(9_552_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notify_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 17_454_000 picoseconds. - Weight::from_parts(17_831_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn migrate_and_notify_old_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_299_000 picoseconds. - Weight::from_parts(35_156_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn new_query() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 4_508_000 picoseconds. - Weight::from_parts(4_702_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn take_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_557_000 picoseconds. - Weight::from_parts(26_980_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/paritydb_weights.rs deleted file mode 100644 index 25679703831..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/paritydb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights - /// are available for brave runtime engineers who may want to try this out as default. - pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::ParityDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/rocksdb_weights.rs deleted file mode 100644 index 3dd817aa6f1..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/rocksdb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout - /// the runtime. - pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::RocksDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs deleted file mode 100644 index 405d7c72e55..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/mod.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -mod pallet_xcm_benchmarks_fungible; -mod pallet_xcm_benchmarks_generic; - -use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; -use frame_support::weights::Weight; -use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; -use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; -use xcm::{latest::prelude::*, DoubleEncoded}; - -trait WeighMultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight; -} - -const MAX_ASSETS: u64 = 100; - -impl WeighMultiAssets for MultiAssetFilter { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - match self { - Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), - Self::Wild(asset) => match asset { - All => weight.saturating_mul(MAX_ASSETS), - AllOf { fun, .. } => match fun { - WildFungibility::Fungible => weight, - // Magic number 2 has to do with the fact that we could have up to 2 times - // MaxAssetsIntoHolding in the worst-case scenario. - WildFungibility::NonFungible => - weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), - }, - AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - }, - } - } -} - -impl WeighMultiAssets for MultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - weight.saturating_mul(self.inner().iter().count() as u64) - } -} - -pub struct AssetHubKusamaXcmWeight(core::marker::PhantomData); -impl XcmWeightInfo for AssetHubKusamaXcmWeight { - fn withdraw_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) - } - fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) - } - fn receive_teleported_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) - } - fn query_response( - _query_id: &u64, - _response: &Response, - _max_weight: &Weight, - _querier: &Option, - ) -> Weight { - XcmGeneric::::query_response() - } - fn transfer_asset(assets: &MultiAssets, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_asset()) - } - fn transfer_reserve_asset( - assets: &MultiAssets, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_reserve_asset()) - } - fn transact( - _origin_type: &OriginKind, - _require_weight_at_most: &Weight, - _call: &DoubleEncoded, - ) -> Weight { - XcmGeneric::::transact() - } - fn hrmp_new_channel_open_request( - _sender: &u32, - _max_message_size: &u32, - _max_capacity: &u32, - ) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_accepted(_recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn clear_origin() -> Weight { - XcmGeneric::::clear_origin() - } - fn descend_origin(_who: &InteriorMultiLocation) -> Weight { - XcmGeneric::::descend_origin() - } - fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_error() - } - - fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) - } - fn deposit_reserve_asset( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_reserve_asset()) - } - fn exchange_asset(_give: &MultiAssetFilter, _receive: &MultiAssets, _maximal: &bool) -> Weight { - Weight::MAX - } - fn initiate_reserve_withdraw( - assets: &MultiAssetFilter, - _reserve: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) - } - fn initiate_teleport( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) - } - fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { - XcmGeneric::::report_holding() - } - fn buy_execution(_fees: &MultiAsset, _weight_limit: &WeightLimit) -> Weight { - XcmGeneric::::buy_execution() - } - fn refund_surplus() -> Weight { - XcmGeneric::::refund_surplus() - } - fn set_error_handler(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_error_handler() - } - fn set_appendix(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_appendix() - } - fn clear_error() -> Weight { - XcmGeneric::::clear_error() - } - fn claim_asset(_assets: &MultiAssets, _ticket: &MultiLocation) -> Weight { - XcmGeneric::::claim_asset() - } - fn trap(_code: &u64) -> Weight { - XcmGeneric::::trap() - } - fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { - XcmGeneric::::subscribe_version() - } - fn unsubscribe_version() -> Weight { - XcmGeneric::::unsubscribe_version() - } - fn burn_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::burn_asset()) - } - fn expect_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::expect_asset()) - } - fn expect_origin(_origin: &Option) -> Weight { - XcmGeneric::::expect_origin() - } - fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { - XcmGeneric::::expect_error() - } - fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { - XcmGeneric::::expect_transact_status() - } - fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::query_pallet() - } - fn expect_pallet( - _index: &u32, - _name: &Vec, - _module_name: &Vec, - _crate_major: &u32, - _min_crate_minor: &u32, - ) -> Weight { - XcmGeneric::::expect_pallet() - } - fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_transact_status() - } - fn clear_transact_status() -> Weight { - XcmGeneric::::clear_transact_status() - } - fn universal_origin(_: &Junction) -> Weight { - Weight::MAX - } - fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { - Weight::MAX - } - fn lock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn unlock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn note_unlockable(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn request_unlock(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn set_fees_mode(_: &bool) -> Weight { - XcmGeneric::::set_fees_mode() - } - fn set_topic(_topic: &[u8; 32]) -> Weight { - XcmGeneric::::set_topic() - } - fn clear_topic() -> Weight { - XcmGeneric::::clear_topic() - } - fn alias_origin(_: &MultiLocation) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX - } - fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { - XcmGeneric::::unpaid_execution() - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs deleted file mode 100644 index e680c2d5c11..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::fungible -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::fungible`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn withdraw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 25_602_000 picoseconds. - Weight::from_parts(26_312_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn transfer_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `6196` - // Minimum execution time: 51_173_000 picoseconds. - Weight::from_parts(52_221_000, 6196) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn transfer_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `246` - // Estimated: `6196` - // Minimum execution time: 74_651_000 picoseconds. - Weight::from_parts(76_500_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) - } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn reserve_asset_deposited() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_reserve_withdraw() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 458_666_000 picoseconds. - Weight::from_parts(470_470_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn receive_teleported_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_701_000 picoseconds. - Weight::from_parts(3_887_000, 0) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn deposit_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 25_709_000 picoseconds. - Weight::from_parts(26_320_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn deposit_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 51_663_000 picoseconds. - Weight::from_parts(52_538_000, 3610) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_teleport() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 31_972_000 picoseconds. - Weight::from_parts(32_834_000, 3610) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs deleted file mode 100644 index 9e8f3bfe75c..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::generic` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::generic`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_holding() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 432_196_000 picoseconds. - Weight::from_parts(438_017_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn buy_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_223_000 picoseconds. - Weight::from_parts(4_412_000, 0) - } - // Storage: `PolkadotXcm::Queries` (r:1 w:0) - // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn query_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3568` - // Minimum execution time: 11_582_000 picoseconds. - Weight::from_parts(11_830_000, 3568) - .saturating_add(T::DbWeight::get().reads(1)) - } - pub fn transact() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_955_000 picoseconds. - Weight::from_parts(14_320_000, 0) - } - pub fn refund_surplus() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_423_000 picoseconds. - Weight::from_parts(4_709_000, 0) - } - pub fn set_error_handler() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_028_000 picoseconds. - Weight::from_parts(3_151_000, 0) - } - pub fn set_appendix() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_966_000 picoseconds. - Weight::from_parts(3_076_000, 0) - } - pub fn clear_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_971_000 picoseconds. - Weight::from_parts(3_119_000, 0) - } - pub fn descend_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_772_000 picoseconds. - Weight::from_parts(3_853_000, 0) - } - pub fn clear_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_940_000 picoseconds. - Weight::from_parts(3_050_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 27_734_000 picoseconds. - Weight::from_parts(28_351_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) - // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn claim_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `160` - // Estimated: `3625` - // Minimum execution time: 16_456_000 picoseconds. - Weight::from_parts(16_846_000, 3625) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn trap() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_974_000 picoseconds. - Weight::from_parts(3_108_000, 0) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn subscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 29_823_000 picoseconds. - Weight::from_parts(30_776_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn unsubscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_966_000 picoseconds. - Weight::from_parts(5_157_000, 0) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn burn_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 141_875_000 picoseconds. - Weight::from_parts(144_925_000, 0) - } - pub fn expect_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_147_000 picoseconds. - Weight::from_parts(13_420_000, 0) - } - pub fn expect_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_050_000 picoseconds. - Weight::from_parts(3_161_000, 0) - } - pub fn expect_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_930_000 picoseconds. - Weight::from_parts(3_077_000, 0) - } - pub fn expect_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_188_000 picoseconds. - Weight::from_parts(3_299_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn query_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 31_678_000 picoseconds. - Weight::from_parts(32_462_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn expect_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_638_000 picoseconds. - Weight::from_parts(5_756_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 27_556_000 picoseconds. - Weight::from_parts(28_240_000, 3574) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn clear_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_932_000 picoseconds. - Weight::from_parts(3_097_000, 0) - } - pub fn set_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_860_000 picoseconds. - Weight::from_parts(2_957_000, 0) - } - pub fn clear_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_886_000 picoseconds. - Weight::from_parts(3_015_000, 0) - } - pub fn set_fees_mode() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_874_000 picoseconds. - Weight::from_parts(3_060_000, 0) - } - pub fn unpaid_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_029_000 picoseconds. - Weight::from_parts(3_158_000, 0) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs deleted file mode 100644 index 05262e07410..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/src/xcm_config.rs +++ /dev/null @@ -1,634 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, ParachainInfo, - ParachainSystem, PolkadotXcm, PoolAssets, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, -}; -use crate::{ForeignAssets, CENTS}; -use assets_common::{ - local_and_foreign_assets::MatchesLocalAndForeignAssetsMultiLocation, - matching::{FromSiblingParachain, IsForeignConcreteAsset}, -}; -use frame_support::{ - match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing, PalletInfoAccess}, -}; -use frame_system::EnsureRoot; -use pallet_xcm::XcmPassthrough; -use parachains_common::{ - impls::ToStakingPot, - xcm_config::{AssetFeeAsExistentialDepositMultiplier, ConcreteAssetFromSystem}, -}; -use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use sp_runtime::traits::ConvertInto; -use xcm::latest::prelude::*; -use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, DescribeAllTerminal, DescribeFamily, - EnsureXcmOrigin, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NoChecking, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, -}; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; - -#[cfg(feature = "runtime-benchmarks")] -use {cumulus_primitives_core::ParaId, sp_core::Get}; - -parameter_types! { - pub const KsmLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Kusama); - pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorMultiLocation = - X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); - pub UniversalLocationNetworkId: NetworkId = UniversalLocation::get().global_consensus().unwrap(); - pub TrustBackedAssetsPalletLocation: MultiLocation = - PalletInstance(::index() as u8).into(); - pub ForeignAssetsPalletLocation: MultiLocation = - PalletInstance(::index() as u8).into(); - pub PoolAssetsPalletLocation: MultiLocation = - PalletInstance(::index() as u8).into(); - pub CheckingAccount: AccountId = PolkadotXcm::check_account(); - pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); - pub const FellowshipLocation: MultiLocation = MultiLocation::parent(); -} - -/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used -/// when determining ownership of accounts for asset transacting and when attempting to use XCM -/// `Transact` in order to determine the dispatch Origin. -pub type LocationToAccountId = ( - // The parent (Relay-chain) origin converts to the parent `AccountId`. - ParentIsPreset, - // Sibling parachain origins convert to AccountId via the `ParaId::into`. - SiblingParachainConvertsVia, - // Straight up local `AccountId32` origins just alias directly to `AccountId`. - AccountId32Aliases, - // Foreign locations alias into accounts according to a hash of their standard description. - HashedDescription>, -); - -/// Means for transacting the native currency on this chain. -pub type CurrencyTransactor = CurrencyAdapter< - // Use this currency: - Balances, - // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Convert an XCM MultiLocation into a local account id: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We don't track any teleports of `Balances`. - (), ->; - -/// `AssetId`/`Balance` converter for `PoolAssets`. -pub type TrustBackedAssetsConvertedConcreteId = - assets_common::TrustBackedAssetsConvertedConcreteId; - -/// Means for transacting assets besides the native currency on this chain. -pub type FungiblesTransactor = FungiblesAdapter< - // Use this fungibles implementation: - Assets, - // Use this currency when it is a fungible asset matching the given location or name: - TrustBackedAssetsConvertedConcreteId, - // Convert an XCM MultiLocation into a local account id: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We only want to allow teleports of known assets. We use non-zero issuance as an indication - // that this asset is known. - LocalMint>, - // The account to use for tracking teleports. - CheckingAccount, ->; - -/// `AssetId/Balance` converter for `TrustBackedAssets` -pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< - ( - // Ignore `TrustBackedAssets` explicitly - StartsWith, - // Ignore assets that start explicitly with our `GlobalConsensus(NetworkId)`, means: - // - foreign assets from our consensus should be: `MultiLocation {parents: 1, - // X*(Parachain(xyz), ..)}` - // - foreign assets outside our consensus with the same `GlobalConsensus(NetworkId)` won't - // be accepted here - StartsWithExplicitGlobalConsensus, - ), - Balance, ->; - -/// Means for transacting foreign assets from different global consensus. -pub type ForeignFungiblesTransactor = FungiblesAdapter< - // Use this fungibles implementation: - ForeignAssets, - // Use this currency when it is a fungible asset matching the given location or name: - ForeignAssetsConvertedConcreteId, - // Convert an XCM MultiLocation into a local account id: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We dont need to check teleports here. - NoChecking, - // The account to use for tracking teleports. - CheckingAccount, ->; - -/// `AssetId`/`Balance` converter for `PoolAssets`. -pub type PoolAssetsConvertedConcreteId = - assets_common::PoolAssetsConvertedConcreteId; - -/// Means for transacting asset conversion pool assets on this chain. -pub type PoolFungiblesTransactor = FungiblesAdapter< - // Use this fungibles implementation: - PoolAssets, - // Use this currency when it is a fungible asset matching the given location or name: - PoolAssetsConvertedConcreteId, - // Convert an XCM MultiLocation into a local account id: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We only want to allow teleports of known assets. We use non-zero issuance as an indication - // that this asset is known. - LocalMint>, - // The account to use for tracking teleports. - CheckingAccount, ->; - -/// Means for transacting assets on this chain. -pub type AssetTransactors = - (CurrencyTransactor, FungiblesTransactor, ForeignFungiblesTransactor, PoolFungiblesTransactor); - -/// Simple `MultiLocation` matcher for Local and Foreign asset `MultiLocation`. -pub struct LocalAndForeignAssetsMultiLocationMatcher; -impl MatchesLocalAndForeignAssetsMultiLocation for LocalAndForeignAssetsMultiLocationMatcher { - fn is_local(location: &MultiLocation) -> bool { - use assets_common::fungible_conversion::MatchesMultiLocation; - TrustBackedAssetsConvertedConcreteId::contains(location) - } - fn is_foreign(location: &MultiLocation) -> bool { - use assets_common::fungible_conversion::MatchesMultiLocation; - ForeignAssetsConvertedConcreteId::contains(location) - } -} -impl Contains for LocalAndForeignAssetsMultiLocationMatcher { - fn contains(location: &MultiLocation) -> bool { - Self::is_local(location) || Self::is_foreign(location) - } -} - -/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// biases the kind of local `Origin` it will become. -pub type XcmOriginToTransactDispatchOrigin = ( - // Sovereign account converter; this attempts to derive an `AccountId` from the origin location - // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. - SovereignSignedViaLocation, - // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when - // recognised. - RelayChainAsNative, - // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when - // recognised. - SiblingParachainAsNative, - // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a - // transaction from the Root origin. - ParentAsSuperuser, - // Native signed account converter; this just converts an `AccountId32` origin into a normal - // `RuntimeOrigin::Signed` origin of the same 32-byte value. - SignedAccountId32AsNative, - // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. - XcmPassthrough, -); - -parameter_types! { - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; - pub XcmAssetFeesReceiver: Option = Authorship::author(); -} - -match_types! { - pub type ParentOrParentsPlurality: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(Plurality { .. }) } - }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; -} - -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Assets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::ForeignAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::PoolAssets( - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::block { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::touch_other { .. } | - pallet_assets::Call::refund { .. } | - pallet_assets::Call::refund_other { .. }, - ) | RuntimeCall::AssetConversion( - pallet_asset_conversion::Call::create_pool { .. } | - pallet_asset_conversion::Call::add_liquidity { .. } | - pallet_asset_conversion::Call::remove_liquidity { .. } | - pallet_asset_conversion::Call::swap_tokens_for_exact_tokens { .. } | - pallet_asset_conversion::Call::swap_exact_tokens_for_tokens { .. }, - ) | RuntimeCall::NftFractionalization( - pallet_nft_fractionalization::Call::fractionalize { .. } | - pallet_nft_fractionalization::Call::unify { .. }, - ) | RuntimeCall::Nfts( - pallet_nfts::Call::create { .. } | - pallet_nfts::Call::force_create { .. } | - pallet_nfts::Call::destroy { .. } | - pallet_nfts::Call::mint { .. } | - pallet_nfts::Call::force_mint { .. } | - pallet_nfts::Call::burn { .. } | - pallet_nfts::Call::transfer { .. } | - pallet_nfts::Call::lock_item_transfer { .. } | - pallet_nfts::Call::unlock_item_transfer { .. } | - pallet_nfts::Call::lock_collection { .. } | - pallet_nfts::Call::transfer_ownership { .. } | - pallet_nfts::Call::set_team { .. } | - pallet_nfts::Call::force_collection_owner { .. } | - pallet_nfts::Call::force_collection_config { .. } | - pallet_nfts::Call::approve_transfer { .. } | - pallet_nfts::Call::cancel_approval { .. } | - pallet_nfts::Call::clear_all_transfer_approvals { .. } | - pallet_nfts::Call::lock_item_properties { .. } | - pallet_nfts::Call::set_attribute { .. } | - pallet_nfts::Call::force_set_attribute { .. } | - pallet_nfts::Call::clear_attribute { .. } | - pallet_nfts::Call::approve_item_attributes { .. } | - pallet_nfts::Call::cancel_item_attributes_approval { .. } | - pallet_nfts::Call::set_metadata { .. } | - pallet_nfts::Call::clear_metadata { .. } | - pallet_nfts::Call::set_collection_metadata { .. } | - pallet_nfts::Call::clear_collection_metadata { .. } | - pallet_nfts::Call::set_accept_ownership { .. } | - pallet_nfts::Call::set_collection_max_supply { .. } | - pallet_nfts::Call::update_mint_settings { .. } | - pallet_nfts::Call::set_price { .. } | - pallet_nfts::Call::buy_item { .. } | - pallet_nfts::Call::pay_tips { .. } | - pallet_nfts::Call::create_swap { .. } | - pallet_nfts::Call::cancel_swap { .. } | - pallet_nfts::Call::claim_swap { .. }, - ) | RuntimeCall::Uniques( - pallet_uniques::Call::create { .. } | - pallet_uniques::Call::force_create { .. } | - pallet_uniques::Call::destroy { .. } | - pallet_uniques::Call::mint { .. } | - pallet_uniques::Call::burn { .. } | - pallet_uniques::Call::transfer { .. } | - pallet_uniques::Call::freeze { .. } | - pallet_uniques::Call::thaw { .. } | - pallet_uniques::Call::freeze_collection { .. } | - pallet_uniques::Call::thaw_collection { .. } | - pallet_uniques::Call::transfer_ownership { .. } | - pallet_uniques::Call::set_team { .. } | - pallet_uniques::Call::approve_transfer { .. } | - pallet_uniques::Call::cancel_approval { .. } | - pallet_uniques::Call::force_item_status { .. } | - pallet_uniques::Call::set_attribute { .. } | - pallet_uniques::Call::clear_attribute { .. } | - pallet_uniques::Call::set_metadata { .. } | - pallet_uniques::Call::clear_metadata { .. } | - pallet_uniques::Call::set_collection_metadata { .. } | - pallet_uniques::Call::clear_collection_metadata { .. } | - pallet_uniques::Call::set_accept_ownership { .. } | - pallet_uniques::Call::set_collection_max_supply { .. } | - pallet_uniques::Call::set_price { .. } | - pallet_uniques::Call::buy_item { .. } - ) - ) - } -} - -pub type Barrier = TrailingSetTopicAsId< - DenyThenTry< - DenyReserveTransferToRelayChain, - ( - TakeWeightCredit, - // Expected responses are OK. - AllowKnownQueryResponses, - // Allow XCMs with some computed origins to pass through. - WithComputedOrigin< - ( - // If the message is one that immediately attempts to pay for execution, then - // allow it. - AllowTopLevelPaidExecutionFrom, - // Parent and its pluralities (i.e. governance bodies) get free execution. - AllowExplicitUnpaidExecutionFrom, - // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, - ), - UniversalLocation, - ConstU32<8>, - >, - ), - >, ->; - -pub type AssetFeeAsExistentialDepositMultiplierFeeCharger = AssetFeeAsExistentialDepositMultiplier< - Runtime, - WeightToFee, - pallet_assets::BalanceToAssetBalance, - TrustBackedAssetsInstance, ->; - -/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: -/// -/// - KSM with the parent Relay Chain and sibling system parachains; and -/// - Sibling parachains' assets from where they originate (as `ForeignCreators`). -pub type TrustedTeleporters = ( - ConcreteAssetFromSystem, - IsForeignConcreteAsset>>, -); - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = AssetTransactors; - type OriginConverter = XcmOriginToTransactDispatchOrigin; - // Asset Hub Kusama does not recognize a reserve location for any asset. This does not prevent - // Asset Hub acting _as_ a reserve location for KSM and assets created under `pallet-assets`. - // For KSM, users must use teleport where allowed (e.g. with the Relay Chain). - type IsReserve = (); - type IsTeleporter = TrustedTeleporters; - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = WeightInfoBounds< - crate::weights::xcm::AssetHubKusamaXcmWeight, - RuntimeCall, - MaxInstructions, - >; - type Trader = ( - UsingComponents>, - cumulus_primitives_utility::TakeFirstAssetTrader< - AccountId, - AssetFeeAsExistentialDepositMultiplierFeeCharger, - TrustBackedAssetsConvertedConcreteId, - Assets, - cumulus_primitives_utility::XcmFeesTo32ByteAccount< - FungiblesTransactor, - AccountId, - XcmAssetFeesReceiver, - >, - >, - ); - type ResponseHandler = PolkadotXcm; - type AssetTrap = PolkadotXcm; - type AssetClaims = PolkadotXcm; - type SubscriptionService = PolkadotXcm; - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; - type Aliasers = Nothing; -} - -/// Converts a local signed origin into an XCM multilocation. -/// Forms the basis for local origins sending/executing XCMs. -pub type LocalOriginToLocation = SignedToAccountId32; - -parameter_types! { - /// The asset ID for the asset that we use to pay for message delivery fees. - pub FeeAssetId: AssetId = Concrete(KsmLocation::get()); - /// The base fee for the message delivery fees. - pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); -} - -pub type PriceForParentDelivery = - ExponentialPrice; - -/// The means for routing XCM messages which are not for local execution into the right message -/// queues. -pub type XcmRouter = WithUniqueTopic<( - // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, - // ..and XCMP to communicate with the sibling chains. - XcmpQueue, -)>; - -impl pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - // We want to disallow users sending (arbitrary) XCMs from this chain. - type SendXcmOrigin = EnsureXcmOrigin; - type XcmRouter = XcmRouter; - // We support local origins dispatching XCM executions in principle... - type ExecuteXcmOrigin = EnsureXcmOrigin; - // ... but disallow generic XCM execution. As a result only teleports and reserve transfers are - // allowed. - type XcmExecuteFilter = Nothing; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Everything; - type Weigher = WeightInfoBounds< - crate::weights::xcm::AssetHubKusamaXcmWeight, - RuntimeCall, - MaxInstructions, - >; - type UniversalLocation = UniversalLocation; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; - type Currency = Balances; - type CurrencyMatcher = (); - type TrustedLockers = (); - type SovereignAccountOf = LocationToAccountId; - type MaxLockers = ConstU32<8>; - type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - type AdminOrigin = EnsureRoot; - type MaxRemoteLockConsumers = ConstU32<0>; - type RemoteLockConsumerIdentifier = (); -} - -impl cumulus_pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; -} - -pub type ForeignCreatorsSovereignAccountOf = ( - SiblingParachainConvertsVia, - AccountId32Aliases, - ParentIsPreset, -); - -/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. -pub struct XcmBenchmarkHelper; -#[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> MultiLocation { - MultiLocation { parents: 1, interior: X1(Parachain(id)) } - } -} - -#[cfg(feature = "runtime-benchmarks")] -pub struct BenchmarkMultiLocationConverter { - _phantom: sp_std::marker::PhantomData, -} - -#[cfg(feature = "runtime-benchmarks")] -impl - pallet_asset_conversion::BenchmarkHelper> - for BenchmarkMultiLocationConverter -where - SelfParaId: Get, -{ - fn asset_id(asset_id: u32) -> MultiLocation { - MultiLocation { - parents: 1, - interior: X3( - Parachain(SelfParaId::get().into()), - PalletInstance(::index() as u8), - GeneralIndex(asset_id.into()), - ), - } - } - fn multiasset_id(asset_id: u32) -> sp_std::boxed::Box { - sp_std::boxed::Box::new(Self::asset_id(asset_id)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs deleted file mode 100644 index cdd4290770f..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-kusama/tests/tests.rs +++ /dev/null @@ -1,658 +0,0 @@ -// This file is part of Cumulus. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Tests for the Statemine (Kusama Assets Hub) chain. - -use asset_hub_kusama_runtime::xcm_config::{ - AssetFeeAsExistentialDepositMultiplierFeeCharger, KsmLocation, LocationToAccountId, - TrustBackedAssetsPalletLocation, -}; -pub use asset_hub_kusama_runtime::{ - xcm_config::{CheckingAccount, ForeignCreatorsSovereignAccountOf, XcmConfig}, - AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, - ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, XcmpQueue, -}; -use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; -use codec::{Decode, Encode}; -use cumulus_primitives_utility::ChargeWeightInFungibles; -use frame_support::{ - assert_noop, assert_ok, - traits::fungibles::InspectEnumerable, - weights::{Weight, WeightToFee as WeightToFeeT}, -}; -use parachains_common::{ - kusama::fee::WeightToFee, AccountId, AssetIdForTrustBackedAssets, AuraId, Balance, -}; -use sp_runtime::traits::MaybeEquivalence; -use xcm::latest::prelude::*; -use xcm_executor::traits::{Identity, JustTry, WeightTrader}; - -const ALICE: [u8; 32] = [1u8; 32]; -const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; - -type AssetIdForTrustBackedAssetsConvert = - assets_common::AssetIdForTrustBackedAssetsConvert; - -type RuntimeHelper = asset_test_utils::RuntimeHelper; - -fn collator_session_keys() -> CollatorSessionKeys { - CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - ) -} - -#[test] -fn test_asset_xcm_trader() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - let minimum_asset_balance = 3333333_u128; - let local_asset_id = 1; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - local_asset_id.into(), - AccountId::from(ALICE).into(), - true, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - local_asset_id.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - // get asset id as multilocation - let asset_multilocation = - AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(); - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 4e9 weight - let bought = Weight::from_parts(4_000_000_000u64, 0); - - // Lets calculate amount needed - let asset_amount_needed = - AssetFeeAsExistentialDepositMultiplierFeeCharger::charge_weight_in_fungibles( - local_asset_id, - bought, - ) - .expect("failed to compute"); - - // Lets pay with: asset_amount_needed + asset_amount_extra - let asset_amount_extra = 100_u128; - let asset: MultiAsset = - (asset_multilocation, asset_amount_needed + asset_amount_extra).into(); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Lets buy_weight and make sure buy_weight does not return an error - let unused_assets = trader.buy_weight(bought, asset.into(), &ctx).expect("Expected Ok"); - // Check whether a correct amount of unused assets is returned - assert_ok!( - unused_assets.ensure_contains(&(asset_multilocation, asset_amount_extra).into()) - ); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has received the amount - assert_eq!( - Assets::balance(local_asset_id, AccountId::from(ALICE)), - minimum_asset_balance + asset_amount_needed - ); - - // We also need to ensure the total supply increased - assert_eq!( - Assets::total_supply(local_asset_id), - minimum_asset_balance + asset_amount_needed - ); - }); -} - -#[test] -fn test_asset_xcm_trader_with_refund() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - 1.into(), - AccountId::from(ALICE).into(), - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 4e9 weight - let bought = Weight::from_parts(4_000_000_000u64, 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - // lets calculate amount needed - let amount_bought = WeightToFee::weight_to_fee(&bought); - - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - - // Make sure buy_weight does not return an error - assert_ok!(trader.buy_weight(bought, asset.clone().into(), &ctx)); - - // Make sure again buy_weight does return an error - // This assert relies on the fact, that we use `TakeFirstAssetTrader` in `WeightTrader` - // tuple chain, which cannot be called twice - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // We actually use half of the weight - let weight_used = bought / 2; - - // Make sure refurnd works. - let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); - - assert_eq!( - trader.refund_weight(bought - weight_used, &ctx), - Some((asset_multilocation, amount_refunded).into()) - ); - - // Drop trader - drop(trader); - - // We only should have paid for half of the bought weight - let fees_paid = WeightToFee::weight_to_fee(&weight_used); - - assert_eq!( - Assets::balance(1, AccountId::from(ALICE)), - ExistentialDeposit::get() + fees_paid - ); - - // We also need to ensure the total supply increased - assert_eq!(Assets::total_supply(1), ExistentialDeposit::get() + fees_paid); - }); -} - -#[test] -fn test_asset_xcm_trader_refund_not_possible_since_amount_less_than_ed() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy small amount - let bought = Weight::from_parts(500_000_000u64, 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let amount_bought = WeightToFee::weight_to_fee(&bought); - - assert!( - amount_bought < ExistentialDeposit::get(), - "we are testing what happens when the amount does not exceed ED" - ); - - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - - // Buy weight should return an error - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // not credited since the ED is higher than this value - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), 0); - - // We also need to ensure the total supply did not increase - assert_eq!(Assets::total_supply(1), 0); - }); -} - -#[test] -fn test_that_buying_ed_refund_does_not_refund() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are gonna buy ED - let bought = Weight::from_parts(ExistentialDeposit::get().try_into().unwrap(), 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let amount_bought = WeightToFee::weight_to_fee(&bought); - - assert!( - amount_bought < ExistentialDeposit::get(), - "we are testing what happens when the amount does not exceed ED" - ); - - // We know we will have to buy at least ED, so lets make sure first it will - // fail with a payment of less than ED - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // Now lets buy ED at least - let asset: MultiAsset = (asset_multilocation, ExistentialDeposit::get()).into(); - - // Buy weight should work - assert_ok!(trader.buy_weight(bought, asset.into(), &ctx)); - - // Should return None. We have a specific check making sure we dont go below ED for - // drop payment - assert_eq!(trader.refund_weight(bought, &ctx), None); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has received the amount - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), ExistentialDeposit::get()); - - // We also need to ensure the total supply increased - assert_eq!(Assets::total_supply(1), ExistentialDeposit::get()); - }); -} - -#[test] -fn test_asset_xcm_trader_not_possible_for_non_sufficient_assets() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // Create a non-sufficient asset with specific existential deposit - let minimum_asset_balance = 1_000_000_u128; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - false, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - 1.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 4e9 weight - let bought = Weight::from_parts(4_000_000_000u64, 0); - - // lets calculate amount needed - let asset_amount_needed = WeightToFee::weight_to_fee(&bought); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let asset: MultiAsset = (asset_multilocation, asset_amount_needed).into(); - - // Make sure again buy_weight does return an error - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has NOT received the amount - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), minimum_asset_balance); - - // We also need to ensure the total supply NOT increased - assert_eq!(Assets::total_supply(1), minimum_asset_balance); - }); -} - -#[test] -fn test_assets_balances_api_works() { - use assets_common::runtime_api::runtime_decl_for_fungibles_api::FungiblesApi; - - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - let local_asset_id = 1; - let foreign_asset_id_multilocation = - MultiLocation { parents: 1, interior: X2(Parachain(1234), GeneralIndex(12345)) }; - - // check before - assert_eq!(Assets::balance(local_asset_id, AccountId::from(ALICE)), 0); - assert_eq!( - ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), - 0 - ); - assert_eq!(Balances::free_balance(AccountId::from(ALICE)), 0); - assert!(Runtime::query_account_balances(AccountId::from(ALICE)) - .unwrap() - .try_as::() - .unwrap() - .is_none()); - - // Drip some balance - use frame_support::traits::fungible::Mutate; - let some_currency = ExistentialDeposit::get(); - Balances::mint_into(&AccountId::from(ALICE), some_currency).unwrap(); - - // We need root origin to create a sufficient asset - let minimum_asset_balance = 3333333_u128; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - local_asset_id.into(), - AccountId::from(ALICE).into(), - true, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - local_asset_id.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - // create foreign asset - let foreign_asset_minimum_asset_balance = 3333333_u128; - assert_ok!(ForeignAssets::force_create( - RuntimeHelper::root_origin(), - foreign_asset_id_multilocation, - AccountId::from(SOME_ASSET_ADMIN).into(), - false, - foreign_asset_minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(ForeignAssets::mint( - RuntimeHelper::origin_of(AccountId::from(SOME_ASSET_ADMIN)), - foreign_asset_id_multilocation, - AccountId::from(ALICE).into(), - 6 * foreign_asset_minimum_asset_balance - )); - - // check after - assert_eq!( - Assets::balance(local_asset_id, AccountId::from(ALICE)), - minimum_asset_balance - ); - assert_eq!( - ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), - 6 * minimum_asset_balance - ); - assert_eq!(Balances::free_balance(AccountId::from(ALICE)), some_currency); - - let result: MultiAssets = Runtime::query_account_balances(AccountId::from(ALICE)) - .unwrap() - .try_into() - .unwrap(); - assert_eq!(result.len(), 3); - - // check currency - assert!(result.inner().iter().any(|asset| asset.eq( - &assets_common::fungible_conversion::convert_balance::( - some_currency - ) - .unwrap() - ))); - // check trusted asset - assert!(result.inner().iter().any(|asset| asset.eq(&( - AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(), - minimum_asset_balance - ) - .into()))); - // check foreign asset - assert!(result.inner().iter().any(|asset| asset.eq(&( - Identity::convert_back(&foreign_asset_id_multilocation).unwrap(), - 6 * foreign_asset_minimum_asset_balance - ) - .into()))); - }); -} - -asset_test_utils::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - 1000 -); - -asset_test_utils::include_teleports_for_foreign_assets_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - ForeignCreatorsSovereignAccountOf, - ForeignAssetsInstance, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_local_consensus_currency_works!( - Runtime, - XcmConfig, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( - asset_transactor_transfer_with_trust_backed_assets_works, - Runtime, - XcmConfig, - TrustBackedAssetsInstance, - AssetIdForTrustBackedAssets, - AssetIdForTrustBackedAssetsConvert, - collator_session_keys(), - ExistentialDeposit::get(), - 12345, - Box::new(|| { - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( - asset_transactor_transfer_with_foreign_assets_works, - Runtime, - XcmConfig, - ForeignAssetsInstance, - MultiLocation, - JustTry, - collator_session_keys(), - ExistentialDeposit::get(), - MultiLocation { parents: 1, interior: X2(Parachain(1313), GeneralIndex(12345)) }, - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_parachain_assets_works!( - Runtime, - XcmConfig, - WeightToFee, - ForeignCreatorsSovereignAccountOf, - ForeignAssetsInstance, - MultiLocation, - JustTry, - collator_session_keys(), - ExistentialDeposit::get(), - AssetDeposit::get(), - MetadataDepositBase::get(), - MetadataDepositPerByte::get(), - Box::new(|pallet_asset_call| RuntimeCall::ForeignAssets(pallet_asset_call).encode()), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ForeignAssets(pallet_asset_event)) => Some(pallet_asset_event), - _ => None, - } - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); - }) -); - -#[test] -fn reserve_transfer_native_asset_to_non_teleport_para_works() { - asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - XcmpQueue, - LocationToAccountId, - >( - collator_session_keys(), - ExistentialDeposit::get(), - AccountId::from(ALICE), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - WeightLimit::Unlimited, - ); -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml deleted file mode 100644 index b5eff6b63af..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/Cargo.toml +++ /dev/null @@ -1,215 +0,0 @@ -[package] -name = "asset-hub-polkadot-runtime" -version = "0.9.420" -authors.workspace = true -edition.workspace = true -description = "Asset Hub Polkadot parachain runtime" -license = "Apache-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1", optional = true } -log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -smallvec = "1.11.0" - -# Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false} -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-uniques = { path = "../../../../../substrate/frame/uniques", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} -sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false} - -# Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } -assets-common = { path = "../common", default-features = false } - -[dev-dependencies] -hex-literal = "0.4.1" -asset-test-utils = { path = "../test-utils" } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } - -[features] -default = [ "std" ] -runtime-benchmarks = [ - "assets-common/runtime-benchmarks", - "cumulus-pallet-dmp-queue/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "cumulus-primitives-utility/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "hex-literal", - "pallet-asset-tx-payment/runtime-benchmarks", - "pallet-assets/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-collator-selection/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-nfts/runtime-benchmarks", - "pallet-proxy/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-uniques/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "cumulus-pallet-xcmp-queue/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-asset-tx-payment/try-runtime", - "pallet-assets/try-runtime", - "pallet-aura/try-runtime", - "pallet-authorship/try-runtime", - "pallet-balances/try-runtime", - "pallet-collator-selection/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-multisig/try-runtime", - "pallet-nfts/try-runtime", - "pallet-proxy/try-runtime", - "pallet-session/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-uniques/try-runtime", - "pallet-utility/try-runtime", - "pallet-xcm/try-runtime", - "parachain-info/try-runtime", - "polkadot-runtime-common/try-runtime", - "sp-runtime/try-runtime", -] -std = [ - "assets-common/std", - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-session-benchmarking/std", - "cumulus-pallet-xcm/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-primitives-core/std", - "cumulus-primitives-utility/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "log/std", - "pallet-asset-tx-payment/std", - "pallet-assets/std", - "pallet-aura/std", - "pallet-authorship/std", - "pallet-balances/std", - "pallet-collator-selection/std", - "pallet-message-queue/std", - "pallet-multisig/std", - "pallet-nfts-runtime-api/std", - "pallet-nfts/std", - "pallet-proxy/std", - "pallet-session/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-uniques/std", - "pallet-utility/std", - "pallet-xcm-benchmarks?/std", - "pallet-xcm/std", - "parachain-info/std", - "parachains-common/std", - "polkadot-core-primitives/std", - "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", - "scale-info/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "sp-weights/std", - "substrate-wasm-builder", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] - -experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/build.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/build.rs deleted file mode 100644 index 60f8a125129..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/build.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[cfg(feature = "std")] -fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} - -#[cfg(not(feature = "std"))] -fn main() {} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs deleted file mode 100644 index 78721b194d0..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs +++ /dev/null @@ -1,1383 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Asset Hub Polkadot Runtime -//! -//! Asset Hub Polkadot is a parachain that provides an interface to create, manage, and use assets. -//! Assets may be fungible or non-fungible. -//! -//! ## Renaming -//! -//! This chain was originally known as "Statemint". You may see references to Statemint, Statemine, -//! and Westmint throughout the codebase. These are synonymous with "Asset Hub Polkadot, Kusama, and -//! Westend", respectively. -//! -//! ## Assets -//! -//! - Fungibles: Configuration of `pallet-assets`. -//! - Non-Fungibles (NFTs): Configuration of `pallet-uniques`. -//! -//! ## Other Functionality -//! -//! ### Native Balances -//! -//! Asset Hub Polkadot uses its parent DOT token as its native asset. -//! -//! ### Governance -//! -//! As a system parachain, Asset Hub defers its governance (namely, its `Root` origin), to its -//! Relay Chain parent, Polkadot. -//! -//! ### Collator Selection -//! -//! Asset Hub uses `pallet-collator-selection`, a simple first-come-first-served registration -//! system where collators can reserve a small bond to join the block producer set. There is no -//! slashing. -//! -//! ### XCM -//! -//! Because Asset Hub is fully under the control of the Relay Chain, it is meant to be a -//! `TrustedTeleporter`. It can also serve as a reserve location to other parachains for DOT as well -//! as other local assets. - -#![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -mod weights; -pub mod xcm_config; - -use assets_common::{ - foreign_creators::ForeignCreators, matching::FromSiblingParachain, MultiLocationForAssetId, -}; -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, ConvertInto, Verify}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, Perbill, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{ - construct_runtime, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - parameter_types, - traits::{ - AsEnsureOriginWithArg, ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, - InstanceFilter, TransformOrigin, - }, - weights::{ConstantMultiplier, Weight}, - PalletId, -}; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, EnsureSigned, -}; -use pallet_nfts::PalletFeatures; -pub use parachains_common as common; -use parachains_common::{ - impls::{AssetsToBlockAuthor, DealWithFees}, - message_queue::*, - polkadot::{consensus::*, currency::*, fee::WeightToFee}, - AccountId, AssetHubPolkadotAuraId as AuraId, AssetIdForTrustBackedAssets, Balance, BlockNumber, - Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, - NORMAL_DISPATCH_RATIO, SLOT_DURATION, -}; -use sp_runtime::RuntimeDebug; -use xcm_config::{ - DotLocation, FellowshipLocation, ForeignAssetsConvertedConcreteId, GovernanceLocation, - TrustBackedAssetsConvertedConcreteId, XcmOriginToTransactDispatchOrigin, -}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -// Polkadot imports -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::latest::BodyId; - -use crate::xcm_config::ForeignCreatorsSovereignAccountOf; -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - // Note: "statemint" is the legacy name for this chain. It has been renamed to - // "asset-hub-polkadot". Many wallets/tools depend on the `spec_name`, so it remains "statemint" - // for the time being. Wallets/tools should update to treat "asset-hub-polkadot" equally. - spec_name: create_runtime_str!("statemint"), - impl_name: create_runtime_str!("statemint"), - authoring_version: 1, - spec_version: 1_004_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 13, - state_version: 0, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 0; -} - -// Configure FRAME pallets to include in runtime. -impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = RuntimeBlockWeights; - type BlockLength = RuntimeBlockLength; - type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; - type Nonce = Nonce; - type Hash = Hash; - type Hashing = BlakeTwo256; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type BlockHashCount = BlockHashCount; - type DbWeight = RocksDbWeight; - type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); - type AccountData = pallet_balances::AccountData; - type SystemWeightInfo = weights::frame_system::WeightInfo; - type SS58Prefix = SS58Prefix; - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = weights::pallet_timestamp::WeightInfo; -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = weights::pallet_balances::WeightInfo; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = MILLICENTS; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = - pallet_transaction_payment::CurrencyAdapter>; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; - type OperationalFeeMultiplier = ConstU8<5>; -} - -parameter_types! { - pub const AssetDeposit: Balance = 10 * UNITS; // 10 UNITS deposit to create fungible asset class - pub const AssetAccountDeposit: Balance = deposit(1, 16); - pub const ApprovalDeposit: Balance = EXISTENTIAL_DEPOSIT; - pub const AssetsStringLimit: u32 = 50; - /// Key = 32 bytes, Value = 36 bytes (32+1+1+1+1) - // https://github.com/paritytech/substrate/blob/069917b/frame/assets/src/lib.rs#L257L271 - pub const MetadataDepositBase: Balance = deposit(1, 68); - pub const MetadataDepositPerByte: Balance = deposit(0, 1); -} - -/// We allow root to execute privileged asset operations. -pub type AssetsForceOrigin = EnsureRoot; - -// Called "Trust Backed" assets because these are generally registered by some account, and users of -// the asset assume it has some claimed backing. The pallet is called `Assets` in -// `construct_runtime` to avoid breaking changes on storage reads. -pub type TrustBackedAssetsInstance = pallet_assets::Instance1; -type TrustBackedAssetsCall = pallet_assets::Call; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = AssetIdForTrustBackedAssets; - type AssetIdParameter = codec::Compact; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - type AssetDeposit = AssetDeposit; - type MetadataDepositBase = MetadataDepositBase; - type MetadataDepositPerByte = MetadataDepositPerByte; - type ApprovalDeposit = ApprovalDeposit; - type StringLimit = AssetsStringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_local::WeightInfo; - type CallbackHandle = (); - type AssetAccountDeposit = AssetAccountDeposit; - type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = (); -} - -parameter_types! { - // we just reuse the same deposits - pub const ForeignAssetsAssetDeposit: Balance = AssetDeposit::get(); - pub const ForeignAssetsAssetAccountDeposit: Balance = AssetAccountDeposit::get(); - pub const ForeignAssetsApprovalDeposit: Balance = ApprovalDeposit::get(); - pub const ForeignAssetsAssetsStringLimit: u32 = AssetsStringLimit::get(); - pub const ForeignAssetsMetadataDepositBase: Balance = MetadataDepositBase::get(); - pub const ForeignAssetsMetadataDepositPerByte: Balance = MetadataDepositPerByte::get(); -} - -/// Assets managed by some foreign location. Note: we do not declare a `ForeignAssetsCall` type, as -/// this type is used in proxy definitions. We assume that a foreign location would not want to set -/// an individual, local account as a proxy for the issuance of their assets. This issuance should -/// be managed by the foreign location's governance. -pub type ForeignAssetsInstance = pallet_assets::Instance2; -impl pallet_assets::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Balance = Balance; - type AssetId = MultiLocationForAssetId; - type AssetIdParameter = MultiLocationForAssetId; - type Currency = Balances; - type CreateOrigin = ForeignCreators< - (FromSiblingParachain>,), - ForeignCreatorsSovereignAccountOf, - AccountId, - >; - type ForceOrigin = AssetsForceOrigin; - type AssetDeposit = ForeignAssetsAssetDeposit; - type MetadataDepositBase = ForeignAssetsMetadataDepositBase; - type MetadataDepositPerByte = ForeignAssetsMetadataDepositPerByte; - type ApprovalDeposit = ForeignAssetsApprovalDeposit; - type StringLimit = ForeignAssetsAssetsStringLimit; - type Freezer = (); - type Extra = (); - type WeightInfo = weights::pallet_assets_foreign::WeightInfo; - type CallbackHandle = (); - type AssetAccountDeposit = ForeignAssetsAssetAccountDeposit; - type RemoveItemsLimit = frame_support::traits::ConstU32<1000>; - #[cfg(feature = "runtime-benchmarks")] - type BenchmarkHelper = xcm_config::XcmBenchmarkHelper; -} - -parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. - pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. - pub const DepositFactor: Balance = deposit(0, 32); - pub const MaxSignatories: u32 = 100; -} - -impl pallet_multisig::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type DepositBase = DepositBase; - type DepositFactor = DepositFactor; - type MaxSignatories = MaxSignatories; - type WeightInfo = weights::pallet_multisig::WeightInfo; -} - -impl pallet_utility::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type PalletsOrigin = OriginCaller; - type WeightInfo = weights::pallet_utility::WeightInfo; -} - -parameter_types! { - // One storage item; key size 32, value size 8; . - pub const ProxyDepositBase: Balance = deposit(1, 40); - // Additional storage item size of 33 bytes. - pub const ProxyDepositFactor: Balance = deposit(0, 33); - pub const MaxProxies: u16 = 32; - // One storage item; key size 32, value size 16 - pub const AnnouncementDepositBase: Balance = deposit(1, 48); - pub const AnnouncementDepositFactor: Balance = deposit(0, 66); - pub const MaxPending: u16 = 32; -} - -/// The type used to represent the kinds of proxying allowed. -#[derive( - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - RuntimeDebug, - MaxEncodedLen, - scale_info::TypeInfo, -)] -pub enum ProxyType { - /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. - Any, - /// Can execute any call that does not transfer funds or assets. - NonTransfer, - /// Proxy with the ability to reject time-delay proxy announcements. - CancelProxy, - /// Assets proxy. Can execute any call from `assets`, **including asset transfers**. - Assets, - /// Owner proxy. Can execute calls related to asset ownership. - AssetOwner, - /// Asset manager. Can execute calls related to asset management. - AssetManager, - /// Collator selection proxy. Can execute calls related to collator selection mechanism. - Collator, -} -impl Default for ProxyType { - fn default() -> Self { - Self::Any - } -} - -impl InstanceFilter for ProxyType { - fn filter(&self, c: &RuntimeCall) -> bool { - match self { - ProxyType::Any => true, - ProxyType::NonTransfer => !matches!( - c, - RuntimeCall::Balances { .. } | - RuntimeCall::Assets { .. } | - RuntimeCall::Nfts { .. } | - RuntimeCall::Uniques { .. } - ), - ProxyType::CancelProxy => matches!( - c, - RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Assets => { - matches!( - c, - RuntimeCall::Assets { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } | - RuntimeCall::Nfts { .. } | RuntimeCall::Uniques { .. } - ) - }, - ProxyType::AssetOwner => matches!( - c, - RuntimeCall::Assets(TrustBackedAssetsCall::create { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::start_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_accounts { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::destroy_approvals { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::finish_destroy { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::transfer_ownership { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_team { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::clear_metadata { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::set_min_balance { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::create { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::destroy { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::redeposit { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::transfer_ownership { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_team { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_max_supply { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::create { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::destroy { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::transfer_ownership { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_team { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_attribute { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::set_collection_max_supply { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::AssetManager => matches!( - c, - RuntimeCall::Assets(TrustBackedAssetsCall::mint { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::burn { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::block { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::freeze_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::thaw_asset { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::touch_other { .. }) | - RuntimeCall::Assets(TrustBackedAssetsCall::refund_other { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::force_mint { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::update_mint_settings { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::mint_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_attributes_pre_signed { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::unlock_item_transfer { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::lock_item_properties { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::set_collection_metadata { .. }) | - RuntimeCall::Nfts(pallet_nfts::Call::clear_collection_metadata { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::mint { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::burn { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::freeze_collection { .. }) | - RuntimeCall::Uniques(pallet_uniques::Call::thaw_collection { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Collator => matches!( - c, - RuntimeCall::CollatorSelection { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - } - } - - fn is_superset(&self, o: &Self) -> bool { - match (self, o) { - (x, y) if x == y => true, - (ProxyType::Any, _) => true, - (_, ProxyType::Any) => false, - (ProxyType::Assets, ProxyType::AssetOwner) => true, - (ProxyType::Assets, ProxyType::AssetManager) => true, - (ProxyType::NonTransfer, ProxyType::Collator) => true, - _ => false, - } - } -} - -impl pallet_proxy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type ProxyType = ProxyType; - type ProxyDepositBase = ProxyDepositBase; - type ProxyDepositFactor = ProxyDepositFactor; - type MaxProxies = MaxProxies; - type WeightInfo = weights::pallet_proxy::WeightInfo; - type MaxPending = MaxPending; - type CallHasher = BlakeTwo256; - type AnnouncementDepositBase = AnnouncementDepositBase; - type AnnouncementDepositFactor = AnnouncementDepositFactor; -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type OutboundXcmpMessageSource = XcmpQueue; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl parachain_info::Config for Runtime {} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -parameter_types! { - // Fellows pluralistic body. - pub const FellowsBodyId: BodyId = BodyId::Technical; -} - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, - >; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - -parameter_types! { - pub const Period: u32 = 6 * HOURS; - pub const Offset: u32 = 0; -} - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = weights::pallet_session::WeightInfo; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // `StakingAdmin` pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = Period; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = weights::pallet_collator_selection::WeightInfo; -} - -impl pallet_asset_tx_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Fungibles = Assets; - type OnChargeAssetTransaction = pallet_asset_tx_payment::FungiblesAdapter< - pallet_assets::BalanceToAssetBalance< - Balances, - Runtime, - ConvertInto, - TrustBackedAssetsInstance, - >, - AssetsToBlockAuthor, - >; -} - -parameter_types! { - pub const UniquesCollectionDeposit: Balance = 10 * UNITS; // 10 UNIT deposit to create uniques class - pub const UniquesItemDeposit: Balance = UNITS / 100; // 1 / 100 UNIT deposit to create uniques instance - pub const UniquesMetadataDepositBase: Balance = deposit(1, 129); - pub const UniquesAttributeDepositBase: Balance = deposit(1, 0); - pub const UniquesDepositPerByte: Balance = deposit(0, 1); -} - -impl pallet_uniques::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type ForceOrigin = AssetsForceOrigin; - type CollectionDeposit = UniquesCollectionDeposit; - type ItemDeposit = UniquesItemDeposit; - type MetadataDepositBase = UniquesMetadataDepositBase; - type AttributeDepositBase = UniquesAttributeDepositBase; - type DepositPerByte = UniquesDepositPerByte; - type StringLimit = ConstU32<128>; - type KeyLimit = ConstU32<32>; // Max 32 bytes per key - type ValueLimit = ConstU32<64>; // Max 64 bytes per value - type WeightInfo = weights::pallet_uniques::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); - type CreateOrigin = AsEnsureOriginWithArg>; - type Locker = (); -} - -parameter_types! { - pub NftsPalletFeatures: PalletFeatures = PalletFeatures::all_enabled(); - pub const NftsMaxDeadlineDuration: BlockNumber = 12 * 30 * DAYS; - // re-use the Uniques deposits - pub const NftsCollectionDeposit: Balance = UniquesCollectionDeposit::get(); - pub const NftsItemDeposit: Balance = UniquesItemDeposit::get(); - pub const NftsMetadataDepositBase: Balance = UniquesMetadataDepositBase::get(); - pub const NftsAttributeDepositBase: Balance = UniquesAttributeDepositBase::get(); - pub const NftsDepositPerByte: Balance = UniquesDepositPerByte::get(); -} - -impl pallet_nfts::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CollectionId = u32; - type ItemId = u32; - type Currency = Balances; - type CreateOrigin = AsEnsureOriginWithArg>; - type ForceOrigin = AssetsForceOrigin; - type Locker = (); - type CollectionDeposit = NftsCollectionDeposit; - type ItemDeposit = NftsItemDeposit; - type MetadataDepositBase = NftsMetadataDepositBase; - type AttributeDepositBase = NftsAttributeDepositBase; - type DepositPerByte = NftsDepositPerByte; - type StringLimit = ConstU32<256>; - type KeyLimit = ConstU32<64>; - type ValueLimit = ConstU32<256>; - type ApprovalsLimit = ConstU32<20>; - type ItemAttributesApprovalsLimit = ConstU32<30>; - type MaxTips = ConstU32<10>; - type MaxDeadlineDuration = NftsMaxDeadlineDuration; - type MaxAttributesPerCall = ConstU32<10>; - type Features = NftsPalletFeatures; - type OffchainSignature = Signature; - type OffchainPublic = ::Signer; - type WeightInfo = weights::pallet_nfts::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type Helper = (); -} - -// Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime - { - // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - // RandomnessCollectiveFlip = 2 removed - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 4, - - // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - AssetTxPayment: pallet_asset_tx_payment::{Pallet, Event} = 12, - - // Collator support. the order of these 5 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, - - // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, - Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, - - // The main stage. - Assets: pallet_assets::::{Pallet, Call, Storage, Event} = 50, - Uniques: pallet_uniques::{Pallet, Call, Storage, Event} = 51, - Nfts: pallet_nfts::{Pallet, Call, Storage, Event} = 52, - ForeignAssets: pallet_assets::::{Pallet, Call, Storage, Event} = 53, - } -); - -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_asset_tx_payment::ChargeAssetTxPayment, -); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; -/// Migrations to apply on runtime upgrade. -pub type Migrations = ( - // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, -); - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, - Migrations, ->; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - frame_benchmarking::define_benchmarks!( - [frame_system, SystemBench::] - [pallet_assets, Local] - [pallet_assets, Foreign] - [pallet_balances, Balances] - [pallet_message_queue, MessageQueue] - [pallet_multisig, Multisig] - [pallet_nfts, Nfts] - [pallet_proxy, Proxy] - [pallet_session, SessionBench::] - [pallet_uniques, Uniques] - [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_parachain_system, ParachainSystem] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - // XCM - [pallet_xcm, PalletXcmExtrinsicsBenchmark::] - // NOTE: Make sure you point to the individual modules below. - [pallet_xcm_benchmarks::fungible, XcmBalances] - [pallet_xcm_benchmarks::generic, XcmGeneric] - ); -} - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl assets_common::runtime_api::FungiblesApi< - Block, - AccountId, - > for Runtime - { - fn query_account_balances(account: AccountId) -> Result { - use assets_common::fungible_conversion::{convert, convert_balance}; - Ok([ - // collect pallet_balance - { - let balance = Balances::free_balance(account.clone()); - if balance > 0 { - vec![convert_balance::(balance)?] - } else { - vec![] - } - }, - // collect pallet_assets (TrustBackedAssets) - convert::<_, _, _, _, TrustBackedAssetsConvertedConcreteId>( - Assets::account_balances(account.clone()) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect pallet_assets (ForeignAssets) - convert::<_, _, _, _, ForeignAssetsConvertedConcreteId>( - ForeignAssets::account_balances(account) - .iter() - .filter(|(_, balance)| balance > &0) - )?, - // collect ... e.g. other tokens - ].concat().into()) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - - // This is defined once again in dispatch_benchmark, because list_benchmarks! - // and add_benchmarks! are macros exported by define_benchmarks! macros and those types - // are referenced in that call. - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - // Benchmark files generated for `Assets/ForeignAssets` instances are by default - // `pallet_assets_assets.rs / pallet_assets_foreign_assets`, which is not really nice, - // so with this redefinition we can change names to nicer: - // `pallet_assets_local.rs / pallet_assets_foreign.rs`. - type Local = pallet_assets::Pallet::; - type Foreign = pallet_assets::Pallet::; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; - use sp_storage::TrackedStorageKey; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - impl pallet_xcm::benchmarking::Config for Runtime { - fn reachable_dest() -> Option { - Some(Parent.into()) - } - - fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // Relay/native token can be teleported between AH and Relay. - Some(( - MultiAsset { - fun: Fungible(EXISTENTIAL_DEPOSIT), - id: Concrete(Parent.into()) - }, - Parent.into(), - )) - } - - fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // AH can reserve transfer native token to some random parachain. - let random_para_id = 43211234; - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests( - random_para_id.into() - ); - Some(( - MultiAsset { - fun: Fungible(EXISTENTIAL_DEPOSIT), - id: Concrete(Parent.into()) - }, - ParentThen(Parachain(random_para_id).into()).into(), - )) - } - } - - use xcm::latest::prelude::*; - use xcm_config::{DotLocation, MaxAssetsIntoHolding}; - use pallet_xcm_benchmarks::asset_instance_from; - - parameter_types! { - pub ExistentialDepositMultiAsset: Option = Some(( - xcm_config::DotLocation::get(), - ExistentialDeposit::get() - ).into()); - } - - impl pallet_xcm_benchmarks::Config for Runtime { - type XcmConfig = xcm_config::XcmConfig; - type AccountIdConverter = xcm_config::LocationToAccountId; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositMultiAsset, - xcm_config::PriceForParentDelivery, - >; - fn valid_destination() -> Result { - Ok(DotLocation::get()) - } - fn worst_case_holding(depositable_count: u32) -> MultiAssets { - // A mix of fungible, non-fungible, and concrete assets. - let holding_non_fungibles = MaxAssetsIntoHolding::get() / 2 - depositable_count; - let holding_fungibles = holding_non_fungibles - 1; - let fungibles_amount: u128 = 100; - let mut assets = (0..holding_fungibles) - .map(|i| { - MultiAsset { - id: Concrete(GeneralIndex(i as u128).into()), - fun: Fungible(fungibles_amount * i as u128), - } - }) - .chain(core::iter::once(MultiAsset { id: Concrete(Here.into()), fun: Fungible(u128::MAX) })) - .chain((0..holding_non_fungibles).map(|i| MultiAsset { - id: Concrete(GeneralIndex(i as u128).into()), - fun: NonFungible(asset_instance_from(i)), - })) - .collect::>(); - - assets.push(MultiAsset { - id: Concrete(DotLocation::get()), - fun: Fungible(1_000_000 * UNITS), - }); - assets.into() - } - } - - parameter_types! { - pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - DotLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(DotLocation::get()) }, - )); - pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; - } - - impl pallet_xcm_benchmarks::fungible::Config for Runtime { - type TransactAsset = Balances; - - type CheckedAccount = CheckedAccount; - type TrustedTeleporter = TrustedTeleporter; - type TrustedReserve = TrustedReserve; - - fn get_multi_asset() -> MultiAsset { - MultiAsset { - id: Concrete(DotLocation::get()), - fun: Fungible(UNITS), - } - } - } - - impl pallet_xcm_benchmarks::generic::Config for Runtime { - type TransactAsset = Balances; - type RuntimeCall = RuntimeCall; - - fn worst_case_response() -> (u64, Response) { - (0u64, Response::Version(Default::default())) - } - - fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((DotLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) - } - - fn subscribe_origin() -> Result { - Ok(DotLocation::get()) - } - - fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = DotLocation::get(); - let assets: MultiAssets = (Concrete(DotLocation::get()), 1_000 * UNITS).into(); - let ticket = MultiLocation { parents: 0, interior: Here }; - Ok((origin, ticket, assets)) - } - - fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn export_message_origin_and_destination( - ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - } - - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - type Local = pallet_assets::Pallet::; - type Foreign = pallet_assets::Pallet::; - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - //TODO: use from relay_well_known_keys::ACTIVE_CONFIG - hex_literal::hex!("06de3d8a54d27e44a9d5ce189618f22db4b49d95320d9021994c850f25b8e385").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{CENTS, MILLICENTS}; - use parachains_common::polkadot::fee; - use sp_runtime::traits::Zero; - use sp_weights::WeightToFee; - - /// We can fit at least 1000 transfers in a block. - #[test] - fn sane_block_weight() { - use pallet_balances::WeightInfo; - let block = RuntimeBlockWeights::get().max_block; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fit = block.checked_div_per_component(&transfer).unwrap_or_default(); - assert!(fit >= 1000, "{} should be at least 1000", fit); - } - - /// The fee for one transfer is at most 1 CENT. - #[test] - fn sane_transfer_fee() { - use pallet_balances::WeightInfo; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&transfer); - assert!(fee <= CENTS, "{} MILLICENTS should be at most 1000", fee / MILLICENTS); - } - - /// Weight is being charged for both dimensions. - #[test] - fn weight_charged_for_both_components() { - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(10_000, 0)); - assert!(!fee.is_zero(), "Charges for ref time"); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, 10_000)); - assert_eq!(fee, CENTS, "10kb maps to CENT"); - } - - /// Filling up a block by proof size is at most 30 times more expensive than ref time. - /// - /// This is just a sanity check. - #[test] - fn full_block_fee_ratio() { - let block = RuntimeBlockWeights::get().max_block; - let time_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(block.ref_time(), 0)); - let proof_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, block.proof_size())); - - let proof_o_time = proof_fee.checked_div(time_fee).unwrap_or_default(); - assert!(proof_o_time <= 30, "{} should be at most 30", proof_o_time); - let time_o_proof = time_fee.checked_div(proof_fee).unwrap_or_default(); - assert!(time_o_proof <= 30, "{} should be at most 30", time_o_proof); - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/block_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/block_weights.rs deleted file mode 100644 index e7fdb2aae2a..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/block_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Importing a block with 0 Extrinsics. - pub const BlockExecutionWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::BlockExecutionWeight::get(); - - // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 100 µs." - ); - // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 50 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index 970534560c6..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// westmint-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/westmint/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_638_000 picoseconds. - Weight::from_parts(1_690_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 22_873 - .saturating_add(Weight::from_parts(24_208_496, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs deleted file mode 100644 index 89c80d0be62..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_xcmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// asset-hub-polkadot-dev -// --output -// cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_xcmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_u32() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 6_000_000 picoseconds. - Weight::from_parts(6_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn enqueue_xcmp_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `3517` - // Minimum execution time: 14_000_000 picoseconds. - Weight::from_parts(15_000_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn suspend_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(4_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn resume_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `1596` - // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) - .saturating_add(Weight::from_parts(0, 1596)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn take_first_concatenated_xcm() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(46_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) - /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65711` - // Estimated: `69176` - // Minimum execution time: 62_000_000 picoseconds. - Weight::from_parts(68_000_000, 0) - .saturating_add(Weight::from_parts(0, 69176)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65710` - // Estimated: `69175` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) - .saturating_add(Weight::from_parts(0, 69175)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/extrinsic_weights.rs deleted file mode 100644 index 1a4adb968bb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/extrinsic_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Executing a NO-OP `System::remarks` Extrinsic. - pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::ExtrinsicBaseWeight::get(); - - // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 10 µs." - ); - // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs deleted file mode 100644 index 0b988fd0f6f..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_956_000 picoseconds. - Weight::from_parts(3_441_280, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(388, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_267_000 picoseconds. - Weight::from_parts(7_462_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3 - .saturating_add(Weight::from_parts(1_816, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_757_000 picoseconds. - Weight::from_parts(4_021_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 97_958_650_000 picoseconds. - Weight::from_parts(102_129_539_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_327_000 picoseconds. - Weight::from_parts(2_511_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_186 - .saturating_add(Weight::from_parts(755_085, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_114_000 picoseconds. - Weight::from_parts(2_177_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_174 - .saturating_add(Weight::from_parts(584_644, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `84 + p * (69 ±0)` - // Estimated: `77 + p * (70 ±0)` - // Minimum execution time: 3_799_000 picoseconds. - Weight::from_parts(3_910_000, 0) - .saturating_add(Weight::from_parts(0, 77)) - // Standard Error: 1_968 - .saturating_add(Weight::from_parts(1_220_745, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs deleted file mode 100644 index 0823dcad88e..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; -pub mod cumulus_pallet_parachain_system; -pub mod cumulus_pallet_xcmp_queue; -pub mod extrinsic_weights; -pub mod frame_system; -pub mod pallet_assets_foreign; -pub mod pallet_assets_local; -pub mod pallet_balances; -pub mod pallet_collator_selection; -pub mod pallet_message_queue; -pub mod pallet_multisig; -pub mod pallet_nfts; -pub mod pallet_proxy; -pub mod pallet_session; -pub mod pallet_timestamp; -pub mod pallet_uniques; -pub mod pallet_utility; -pub mod pallet_xcm; -pub mod paritydb_weights; -pub mod rocksdb_weights; -pub mod xcm; - -pub use block_weights::constants::BlockExecutionWeight; -pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; -pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs deleted file mode 100644 index adb686c0afc..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_foreign.rs +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_assets -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `107` - // Estimated: `4273` - // Minimum execution time: 29_979_000 picoseconds. - Weight::from_parts(30_763_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `4` - // Estimated: `4273` - // Minimum execution time: 12_255_000 picoseconds. - Weight::from_parts(12_614_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 15_240_000 picoseconds. - Weight::from_parts(15_627_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1001 w:1000) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1000 w:1000) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `4273 + c * (3207 ±0)` - // Minimum execution time: 17_814_000 picoseconds. - Weight::from_parts(18_006_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 10_358 - .saturating_add(Weight::from_parts(15_409_972, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 3207).saturating_mul(c.into())) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1001 w:1000) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `413 + a * (86 ±0)` - // Estimated: `4273 + a * (3221 ±0)` - // Minimum execution time: 18_957_000 picoseconds. - Weight::from_parts(19_347_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 5_051 - .saturating_add(Weight::from_parts(15_416_931, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 3221).saturating_mul(a.into())) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:0) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_409_000 picoseconds. - Weight::from_parts(15_835_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 26_753_000 picoseconds. - Weight::from_parts(27_349_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 33_918_000 picoseconds. - Weight::from_parts(34_624_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 45_863_000 picoseconds. - Weight::from_parts(46_674_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 40_592_000 picoseconds. - Weight::from_parts(41_582_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `7404` - // Minimum execution time: 46_170_000 picoseconds. - Weight::from_parts(46_880_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_421_000 picoseconds. - Weight::from_parts(19_003_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_009_000 picoseconds. - Weight::from_parts(18_683_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 14_702_000 picoseconds. - Weight::from_parts(15_118_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 14_329_000 picoseconds. - Weight::from_parts(14_857_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:0) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_776_000 picoseconds. - Weight::from_parts(16_337_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 14_290_000 picoseconds. - Weight::from_parts(14_655_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 29_296_000 picoseconds. - Weight::from_parts(30_512_261, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 474 - .saturating_add(Weight::from_parts(530, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `4273` - // Minimum execution time: 30_342_000 picoseconds. - Weight::from_parts(31_030_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `81` - // Estimated: `4273` - // Minimum execution time: 13_574_000 picoseconds. - Weight::from_parts(14_181_016, 0) - .saturating_add(Weight::from_parts(0, 4273)) - // Standard Error: 262 - .saturating_add(Weight::from_parts(420, 0).saturating_mul(n.into())) - // Standard Error: 262 - .saturating_add(Weight::from_parts(1_118, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Metadata` (r:1 w:1) - /// Proof: `ForeignAssets::Metadata` (`max_values`: None, `max_size`: Some(738), added: 3213, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `406` - // Estimated: `4273` - // Minimum execution time: 29_679_000 picoseconds. - Weight::from_parts(30_346_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 13_334_000 picoseconds. - Weight::from_parts(13_827_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `276` - // Estimated: `4273` - // Minimum execution time: 32_648_000 picoseconds. - Weight::from_parts(33_555_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:2 w:2) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `520` - // Estimated: `7404` - // Minimum execution time: 65_431_000 picoseconds. - Weight::from_parts(66_502_000, 0) - .saturating_add(Weight::from_parts(0, 7404)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `446` - // Estimated: `4273` - // Minimum execution time: 35_207_000 picoseconds. - Weight::from_parts(35_915_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Approvals` (r:1 w:1) - /// Proof: `ForeignAssets::Approvals` (`max_values`: None, `max_size`: Some(746), added: 3221, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `446` - // Estimated: `4273` - // Minimum execution time: 35_768_000 picoseconds. - Weight::from_parts(36_553_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 15_108_000 picoseconds. - Weight::from_parts(15_556_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `345` - // Estimated: `4273` - // Minimum execution time: 34_373_000 picoseconds. - Weight::from_parts(35_200_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4273` - // Minimum execution time: 32_201_000 picoseconds. - Weight::from_parts(33_591_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `471` - // Estimated: `4273` - // Minimum execution time: 31_148_000 picoseconds. - Weight::from_parts(31_751_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Asset` (r:1 w:1) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `4273` - // Minimum execution time: 29_127_000 picoseconds. - Weight::from_parts(29_922_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ForeignAssets::Asset` (r:1 w:0) - /// Proof: `ForeignAssets::Asset` (`max_values`: None, `max_size`: Some(808), added: 3283, mode: `MaxEncodedLen`) - /// Storage: `ForeignAssets::Account` (r:1 w:1) - /// Proof: `ForeignAssets::Account` (`max_values`: None, `max_size`: Some(732), added: 3207, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `350` - // Estimated: `4273` - // Minimum execution time: 18_386_000 picoseconds. - Weight::from_parts(18_762_000, 0) - .saturating_add(Weight::from_parts(0, 4273)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs deleted file mode 100644 index 810f5b57c45..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_assets_local.rs +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_assets` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_assets -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_assets`. -pub struct WeightInfo(PhantomData); -impl pallet_assets::WeightInfo for WeightInfo { - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3675` - // Minimum execution time: 26_698_000 picoseconds. - Weight::from_parts(27_507_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3675` - // Minimum execution time: 10_833_000 picoseconds. - Weight::from_parts(11_314_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn start_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_389_000 picoseconds. - Weight::from_parts(14_231_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1001 w:1000) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1000 w:1000) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `c` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - fn destroy_accounts(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + c * (208 ±0)` - // Estimated: `3675 + c * (2609 ±0)` - // Minimum execution time: 16_027_000 picoseconds. - Weight::from_parts(16_455_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 10_266 - .saturating_add(Weight::from_parts(15_263_742, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2609).saturating_mul(c.into())) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1001 w:1000) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy_approvals(a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `414 + a * (86 ±0)` - // Estimated: `3675 + a * (2623 ±0)` - // Minimum execution time: 17_167_000 picoseconds. - Weight::from_parts(17_397_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 5_072 - .saturating_add(Weight::from_parts(15_429_203, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2623).saturating_mul(a.into())) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:0) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn finish_destroy() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_694_000 picoseconds. - Weight::from_parts(14_239_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 24_406_000 picoseconds. - Weight::from_parts(24_981_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 31_372_000 picoseconds. - Weight::from_parts(32_021_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 42_982_000 picoseconds. - Weight::from_parts(43_918_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 37_161_000 picoseconds. - Weight::from_parts(38_756_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `6208` - // Minimum execution time: 43_141_000 picoseconds. - Weight::from_parts(44_187_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_721_000 picoseconds. - Weight::from_parts(17_433_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_623_000 picoseconds. - Weight::from_parts(17_110_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn freeze_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_079_000 picoseconds. - Weight::from_parts(13_700_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn thaw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 13_026_000 picoseconds. - Weight::from_parts(13_444_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:0) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_945_000 picoseconds. - Weight::from_parts(14_792_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 12_800_000 picoseconds. - Weight::from_parts(13_183_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn set_metadata(_n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 27_637_000 picoseconds. - Weight::from_parts(28_967_060, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 464 - .saturating_add(Weight::from_parts(572, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `407` - // Estimated: `3675` - // Minimum execution time: 28_427_000 picoseconds. - Weight::from_parts(28_961_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - /// The range of component `n` is `[0, 50]`. - /// The range of component `s` is `[0, 50]`. - fn force_set_metadata(_n: u32, s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `3675` - // Minimum execution time: 12_251_000 picoseconds. - Weight::from_parts(12_928_907, 0) - .saturating_add(Weight::from_parts(0, 3675)) - // Standard Error: 244 - .saturating_add(Weight::from_parts(1_800, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Metadata` (r:1 w:1) - /// Proof: `Assets::Metadata` (`max_values`: None, `max_size`: Some(140), added: 2615, mode: `MaxEncodedLen`) - fn force_clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `407` - // Estimated: `3675` - // Minimum execution time: 28_263_000 picoseconds. - Weight::from_parts(29_165_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn force_asset_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 12_343_000 picoseconds. - Weight::from_parts(12_659_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3675` - // Minimum execution time: 31_113_000 picoseconds. - Weight::from_parts(31_798_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:2 w:2) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `521` - // Estimated: `6208` - // Minimum execution time: 61_428_000 picoseconds. - Weight::from_parts(62_707_000, 0) - .saturating_add(Weight::from_parts(0, 6208)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3675` - // Minimum execution time: 33_538_000 picoseconds. - Weight::from_parts(34_216_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Approvals` (r:1 w:1) - /// Proof: `Assets::Approvals` (`max_values`: None, `max_size`: Some(148), added: 2623, mode: `MaxEncodedLen`) - fn force_cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `447` - // Estimated: `3675` - // Minimum execution time: 33_870_000 picoseconds. - Weight::from_parts(34_709_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn set_min_balance() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 13_358_000 picoseconds. - Weight::from_parts(13_735_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn touch() -> Weight { - // Proof Size summary in bytes: - // Measured: `346` - // Estimated: `3675` - // Minimum execution time: 32_159_000 picoseconds. - Weight::from_parts(32_998_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn touch_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `243` - // Estimated: `3675` - // Minimum execution time: 30_709_000 picoseconds. - Weight::from_parts(31_486_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn refund() -> Weight { - // Proof Size summary in bytes: - // Measured: `472` - // Estimated: `3675` - // Minimum execution time: 29_557_000 picoseconds. - Weight::from_parts(30_510_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - /// Storage: `Assets::Asset` (r:1 w:1) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - fn refund_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `402` - // Estimated: `3675` - // Minimum execution time: 28_027_000 picoseconds. - Weight::from_parts(28_865_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Assets::Asset` (r:1 w:0) - /// Proof: `Assets::Asset` (`max_values`: None, `max_size`: Some(210), added: 2685, mode: `MaxEncodedLen`) - /// Storage: `Assets::Account` (r:1 w:1) - /// Proof: `Assets::Account` (`max_values`: None, `max_size`: Some(134), added: 2609, mode: `MaxEncodedLen`) - fn block() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `3675` - // Minimum execution time: 16_758_000 picoseconds. - Weight::from_parts(17_280_000, 0) - .saturating_add(Weight::from_parts(0, 3675)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs deleted file mode 100644 index 7c4501e6d88..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_balances.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_balances` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_balances`. -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_allow_death() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 56_173_000 picoseconds. - Weight::from_parts(57_097_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 41_470_000 picoseconds. - Weight::from_parts(42_051_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_creating() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 14_771_000 picoseconds. - Weight::from_parts(15_125_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_killing() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 22_210_000 picoseconds. - Weight::from_parts(22_712_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 57_475_000 picoseconds. - Weight::from_parts(58_343_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_all() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 52_139_000 picoseconds. - Weight::from_parts(52_601_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_unreserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 17_372_000 picoseconds. - Weight::from_parts(17_978_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:999 w:999) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 17_143_000 picoseconds. - Weight::from_parts(17_475_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 16_909 - .saturating_add(Weight::from_parts(15_474_628, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs deleted file mode 100644 index b3062984baf..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `163 + b * (79 ±0)` - // Estimated: `1154 + b * (2555 ±0)` - // Minimum execution time: 14_882_000 picoseconds. - Weight::from_parts(12_290_529, 0) - .saturating_add(Weight::from_parts(0, 1154)) - // Standard Error: 6_842 - .saturating_add(Weight::from_parts(3_189_571, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `756 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 48_113_000 picoseconds. - Weight::from_parts(49_767_909, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_725 - .saturating_add(Weight::from_parts(232_655, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `119 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 16_228_000 picoseconds. - Weight::from_parts(16_351_387, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_953 - .saturating_add(Weight::from_parts(140_754, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_541_000 picoseconds. - Weight::from_parts(7_720_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_402_000 picoseconds. - Weight::from_parts(7_729_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `736 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 41_874_000 picoseconds. - Weight::from_parts(45_654_015, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_317 - .saturating_add(Weight::from_parts(221_237, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[4, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 33_693_000 picoseconds. - Weight::from_parts(37_321_527, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 3_499 - .saturating_add(Weight::from_parts(182_068, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn update_bond(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn take_candidate_slot(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 44_412_000 picoseconds. - Weight::from_parts(45_196_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 17_360_000 picoseconds. - Weight::from_parts(17_599_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 350_829 - .saturating_add(Weight::from_parts(15_375_949, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs deleted file mode 100644 index a9f0cb07cfe..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// westmint-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/westmint/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 12_192_000 picoseconds. - Weight::from_parts(12_192_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 10_447_000 picoseconds. - Weight::from_parts(10_447_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 4_851_000 picoseconds. - Weight::from_parts(4_851_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_342_000 picoseconds. - Weight::from_parts(6_342_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_199_000 picoseconds. - Weight::from_parts(6_199_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 58_612_000 picoseconds. - Weight::from_parts(58_612_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 7_296_000 picoseconds. - Weight::from_parts(7_296_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 48_345_000 picoseconds. - Weight::from_parts(48_345_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 56_441_000 picoseconds. - Weight::from_parts(56_441_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 70_858_000 picoseconds. - Weight::from_parts(70_858_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs deleted file mode 100644 index 0bb05511d7a..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_multisig` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_multisig`. -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_710_000 picoseconds. - Weight::from_parts(14_702_959, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7 - .saturating_add(Weight::from_parts(568, 0).saturating_mul(z.into())) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `262 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 45_518_000 picoseconds. - Weight::from_parts(35_243_068, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_634 - .saturating_add(Weight::from_parts(116_658, 0).saturating_mul(s.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_444, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 29_590_000 picoseconds. - Weight::from_parts(21_574_604, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_622 - .saturating_add(Weight::from_parts(95_669, 0).saturating_mul(s.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_459, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `385 + s * (33 ±0)` - // Estimated: `6811` - // Minimum execution time: 51_056_000 picoseconds. - Weight::from_parts(35_799_301, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_629 - .saturating_add(Weight::from_parts(183_343, 0).saturating_mul(s.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_686, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 30_910_000 picoseconds. - Weight::from_parts(32_413_023, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_346 - .saturating_add(Weight::from_parts(128_779, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 17_926_000 picoseconds. - Weight::from_parts(18_477_305, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_367 - .saturating_add(Weight::from_parts(113_018, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + s * (1 ±0)` - // Estimated: `6811` - // Minimum execution time: 32_232_000 picoseconds. - Weight::from_parts(33_724_753, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_192 - .saturating_add(Weight::from_parts(121_574, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs deleted file mode 100644 index 842daf49f59..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_nfts.rs +++ /dev/null @@ -1,772 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_nfts` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_nfts -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_nfts`. -pub struct WeightInfo(PhantomData); -impl pallet_nfts::WeightInfo for WeightInfo { - /// Storage: `Nfts::NextCollectionId` (r:1 w:1) - /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3549` - // Minimum execution time: 37_915_000 picoseconds. - Weight::from_parts(39_275_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::NextCollectionId` (r:1 w:1) - /// Proof: `Nfts::NextCollectionId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:0 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3549` - // Minimum execution time: 22_722_000 picoseconds. - Weight::from_parts(23_500_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:1) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1001 w:1000) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1000 w:1000) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:0 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:1) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// The range of component `m` is `[0, 1000]`. - /// The range of component `c` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(_m: u32, _c: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32170 + a * (366 ±0)` - // Estimated: `2523990 + a * (2954 ±0)` - // Minimum execution time: 1_231_520_000 picoseconds. - Weight::from_parts(1_228_960_098, 0) - .saturating_add(Weight::from_parts(0, 2523990)) - // Standard Error: 8_836 - .saturating_add(Weight::from_parts(6_818_975, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(1004)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(1005)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(a.into())) - } - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `421` - // Estimated: `4326` - // Minimum execution time: 48_581_000 picoseconds. - Weight::from_parts(50_020_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn force_mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `421` - // Estimated: `4326` - // Minimum execution time: 47_171_000 picoseconds. - Weight::from_parts(48_084_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:0) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:0 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `530` - // Estimated: `4326` - // Minimum execution time: 53_591_000 picoseconds. - Weight::from_parts(55_074_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:2) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `4326` - // Minimum execution time: 40_935_000 picoseconds. - Weight::from_parts(41_835_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:5000 w:5000) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn redeposit(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `729 + i * (108 ±0)` - // Estimated: `3549 + i * (3336 ±0)` - // Minimum execution time: 16_543_000 picoseconds. - Weight::from_parts(16_769_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - // Standard Error: 23_638 - .saturating_add(Weight::from_parts(17_762_895, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3336).saturating_mul(i.into())) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn lock_item_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `3534` - // Minimum execution time: 20_446_000 picoseconds. - Weight::from_parts(20_740_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn unlock_item_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `3534` - // Minimum execution time: 20_088_000 picoseconds. - Weight::from_parts(20_627_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn lock_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `306` - // Estimated: `3549` - // Minimum execution time: 17_036_000 picoseconds. - Weight::from_parts(17_435_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:2) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `354` - // Estimated: `3549` - // Minimum execution time: 22_528_000 picoseconds. - Weight::from_parts(23_047_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:2 w:4) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `335` - // Estimated: `6078` - // Minimum execution time: 38_473_000 picoseconds. - Weight::from_parts(39_353_000, 0) - .saturating_add(Weight::from_parts(0, 6078)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionAccount` (r:0 w:2) - /// Proof: `Nfts::CollectionAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_collection_owner() -> Weight { - // Proof Size summary in bytes: - // Measured: `277` - // Estimated: `3549` - // Minimum execution time: 17_708_000 picoseconds. - Weight::from_parts(18_022_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:0 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn force_collection_config() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `3549` - // Minimum execution time: 14_606_000 picoseconds. - Weight::from_parts(14_891_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn lock_item_properties() -> Weight { - // Proof Size summary in bytes: - // Measured: `401` - // Estimated: `3534` - // Minimum execution time: 19_492_000 picoseconds. - Weight::from_parts(19_919_000, 0) - .saturating_add(Weight::from_parts(0, 3534)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - fn set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `3944` - // Minimum execution time: 50_583_000 picoseconds. - Weight::from_parts(53_846_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - fn force_set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `310` - // Estimated: `3944` - // Minimum execution time: 25_937_000 picoseconds. - Weight::from_parts(26_540_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Attribute` (r:1 w:1) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - fn clear_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `949` - // Estimated: `3944` - // Minimum execution time: 45_738_000 picoseconds. - Weight::from_parts(46_468_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - fn approve_item_attributes() -> Weight { - // Proof Size summary in bytes: - // Measured: `347` - // Estimated: `4466` - // Minimum execution time: 17_361_000 picoseconds. - Weight::from_parts(18_191_000, 0) - .saturating_add(Weight::from_parts(0, 4466)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1001 w:1000) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - fn cancel_item_attributes_approval(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `726 + n * (398 ±0)` - // Estimated: `4466 + n * (2954 ±0)` - // Minimum execution time: 25_884_000 picoseconds. - Weight::from_parts(26_265_000, 0) - .saturating_add(Weight::from_parts(0, 4466)) - // Standard Error: 6_423 - .saturating_add(Weight::from_parts(6_507_369, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - fn set_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `505` - // Estimated: `3812` - // Minimum execution time: 40_802_000 picoseconds. - Weight::from_parts(41_742_000, 0) - .saturating_add(Weight::from_parts(0, 3812)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `815` - // Estimated: `3812` - // Minimum execution time: 38_904_000 picoseconds. - Weight::from_parts(39_919_000, 0) - .saturating_add(Weight::from_parts(0, 3812)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - fn set_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `3759` - // Minimum execution time: 37_012_000 picoseconds. - Weight::from_parts(37_632_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionMetadataOf` (r:1 w:1) - /// Proof: `Nfts::CollectionMetadataOf` (`max_values`: None, `max_size`: Some(294), added: 2769, mode: `MaxEncodedLen`) - fn clear_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `682` - // Estimated: `3759` - // Minimum execution time: 36_243_000 picoseconds. - Weight::from_parts(37_313_000, 0) - .saturating_add(Weight::from_parts(0, 3759)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `376` - // Estimated: `4326` - // Minimum execution time: 20_919_000 picoseconds. - Weight::from_parts(21_505_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `384` - // Estimated: `4326` - // Minimum execution time: 18_943_000 picoseconds. - Weight::from_parts(19_969_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn clear_all_transfer_approvals() -> Weight { - // Proof Size summary in bytes: - // Measured: `384` - // Estimated: `4326` - // Minimum execution time: 17_320_000 picoseconds. - Weight::from_parts(18_071_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Nfts::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_accept_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3517` - // Minimum execution time: 14_934_000 picoseconds. - Weight::from_parts(15_422_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - fn set_collection_max_supply() -> Weight { - // Proof Size summary in bytes: - // Measured: `306` - // Estimated: `3549` - // Minimum execution time: 18_715_000 picoseconds. - Weight::from_parts(19_025_000, 0) - .saturating_add(Weight::from_parts(0, 3549)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:1 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:1) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - fn update_mint_settings() -> Weight { - // Proof Size summary in bytes: - // Measured: `289` - // Estimated: `3538` - // Minimum execution time: 18_249_000 picoseconds. - Weight::from_parts(18_826_000, 0) - .saturating_add(Weight::from_parts(0, 3538)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn set_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `484` - // Estimated: `4326` - // Minimum execution time: 23_529_000 picoseconds. - Weight::from_parts(23_958_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:1 w:1) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:1 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:2) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn buy_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `671` - // Estimated: `4326` - // Minimum execution time: 50_885_000 picoseconds. - Weight::from_parts(52_157_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// The range of component `n` is `[0, 10]`. - fn pay_tips(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_258_000 picoseconds. - Weight::from_parts(3_342_691, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 6_268 - .saturating_add(Weight::from_parts(3_761_373, 0).saturating_mul(n.into())) - } - /// Storage: `Nfts::Item` (r:2 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:0 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - fn create_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `460` - // Estimated: `7662` - // Minimum execution time: 21_220_000 picoseconds. - Weight::from_parts(21_654_000, 0) - .saturating_add(Weight::from_parts(0, 7662)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::PendingSwapOf` (r:1 w:1) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - fn cancel_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `479` - // Estimated: `4326` - // Minimum execution time: 20_430_000 picoseconds. - Weight::from_parts(21_038_000, 0) - .saturating_add(Weight::from_parts(0, 4326)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Nfts::Item` (r:2 w:2) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::PendingSwapOf` (r:1 w:2) - /// Proof: `Nfts::PendingSwapOf` (`max_values`: None, `max_size`: Some(71), added: 2546, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:0) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:2 w:0) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:2 w:0) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:4) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemPriceOf` (r:0 w:2) - /// Proof: `Nfts::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn claim_swap() -> Weight { - // Proof Size summary in bytes: - // Measured: `800` - // Estimated: `7662` - // Minimum execution time: 83_344_000 picoseconds. - Weight::from_parts(84_898_000, 0) - .saturating_add(Weight::from_parts(0, 7662)) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(10)) - } - /// Storage: `Nfts::CollectionRoleOf` (r:2 w:0) - /// Proof: `Nfts::CollectionRoleOf` (`max_values`: None, `max_size`: Some(69), added: 2544, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Item` (r:1 w:1) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemConfigOf` (r:1 w:1) - /// Proof: `Nfts::ItemConfigOf` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:10 w:10) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemMetadataOf` (r:1 w:1) - /// Proof: `Nfts::ItemMetadataOf` (`max_values`: None, `max_size`: Some(347), added: 2822, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Account` (r:0 w:1) - /// Proof: `Nfts::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 10]`. - fn mint_pre_signed(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `524` - // Estimated: `6078 + n * (2954 ±0)` - // Minimum execution time: 143_435_000 picoseconds. - Weight::from_parts(151_744_537, 0) - .saturating_add(Weight::from_parts(0, 6078)) - // Standard Error: 44_459 - .saturating_add(Weight::from_parts(31_293_503, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(6)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } - /// Storage: `Nfts::Item` (r:1 w:0) - /// Proof: `Nfts::Item` (`max_values`: None, `max_size`: Some(861), added: 3336, mode: `MaxEncodedLen`) - /// Storage: `Nfts::ItemAttributesApprovalsOf` (r:1 w:1) - /// Proof: `Nfts::ItemAttributesApprovalsOf` (`max_values`: None, `max_size`: Some(1001), added: 3476, mode: `MaxEncodedLen`) - /// Storage: `Nfts::CollectionConfigOf` (r:1 w:0) - /// Proof: `Nfts::CollectionConfigOf` (`max_values`: None, `max_size`: Some(73), added: 2548, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Collection` (r:1 w:1) - /// Proof: `Nfts::Collection` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) - /// Storage: `Nfts::Attribute` (r:10 w:10) - /// Proof: `Nfts::Attribute` (`max_values`: None, `max_size`: Some(479), added: 2954, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 10]`. - fn set_attributes_pre_signed(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `554` - // Estimated: `4466 + n * (2954 ±0)` - // Minimum execution time: 84_627_000 picoseconds. - Weight::from_parts(96_076_065, 0) - .saturating_add(Weight::from_parts(0, 4466)) - // Standard Error: 62_058 - .saturating_add(Weight::from_parts(30_461_383, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2954).saturating_mul(n.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs deleted file mode 100644 index b6121f2fca2..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_proxy.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_proxy` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_proxy -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_proxy`. -pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 16_130_000 picoseconds. - Weight::from_parts(16_649_312, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 761 - .saturating_add(Weight::from_parts(42_507, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn proxy_announced(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 37_732_000 picoseconds. - Weight::from_parts(36_993_926, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 3_278 - .saturating_add(Weight::from_parts(144_955, 0).saturating_mul(a.into())) - // Standard Error: 3_387 - .saturating_add(Weight::from_parts(64_624, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 24_229_000 picoseconds. - Weight::from_parts(24_199_507, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_672 - .saturating_add(Weight::from_parts(124_324, 0).saturating_mul(a.into())) - // Standard Error: 1_727 - .saturating_add(Weight::from_parts(28_481, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 23_868_000 picoseconds. - Weight::from_parts(25_293_069, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_728 - .saturating_add(Weight::from_parts(114_080, 0).saturating_mul(a.into())) - // Standard Error: 1_786 - .saturating_add(Weight::from_parts(3_690, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn announce(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `386 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 34_343_000 picoseconds. - Weight::from_parts(34_539_112, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_917 - .saturating_add(Weight::from_parts(117_360, 0).saturating_mul(a.into())) - // Standard Error: 1_981 - .saturating_add(Weight::from_parts(40_908, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn add_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 25_506_000 picoseconds. - Weight::from_parts(26_350_920, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_950 - .saturating_add(Weight::from_parts(48_972, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 25_234_000 picoseconds. - Weight::from_parts(26_232_489, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_468 - .saturating_add(Weight::from_parts(48_955, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxies(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 22_184_000 picoseconds. - Weight::from_parts(22_974_929, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_200 - .saturating_add(Weight::from_parts(45_741, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `4706` - // Minimum execution time: 27_044_000 picoseconds. - Weight::from_parts(27_978_605, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_206 - .saturating_add(Weight::from_parts(13_736, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 30]`. - fn kill_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 22_770_000 picoseconds. - Weight::from_parts(23_441_470, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_959 - .saturating_add(Weight::from_parts(47_317, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs deleted file mode 100644 index 560322abeb3..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_session.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_session` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_session`. -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:1 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn set_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `270` - // Estimated: `3735` - // Minimum execution time: 16_684_000 picoseconds. - Weight::from_parts(17_167_000, 0) - .saturating_add(Weight::from_parts(0, 3735)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:0 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn purge_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `3707` - // Minimum execution time: 11_692_000 picoseconds. - Weight::from_parts(12_248_000, 0) - .saturating_add(Weight::from_parts(0, 3707)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs deleted file mode 100644 index 17b050c3e90..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `86` - // Estimated: `1493` - // Minimum execution time: 9_214_000 picoseconds. - Weight::from_parts(9_535_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_269_000 picoseconds. - Weight::from_parts(3_458_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs deleted file mode 100644 index 5b13d56f5bb..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_uniques.rs +++ /dev/null @@ -1,466 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_uniques` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_uniques -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_uniques`. -pub struct WeightInfo(PhantomData); -impl pallet_uniques::WeightInfo for WeightInfo { - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn create() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3643` - // Minimum execution time: 29_513_000 picoseconds. - Weight::from_parts(30_346_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_create() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3643` - // Minimum execution time: 13_600_000 picoseconds. - Weight::from_parts(14_110_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1001 w:1000) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1000 w:1000) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1000 w:1000) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:0 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1000) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::CollectionMaxSupply` (r:0 w:1) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - /// The range of component `m` is `[0, 1000]`. - /// The range of component `a` is `[0, 1000]`. - fn destroy(n: u32, m: u32, a: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `257 + a * (107 ±0) + m * (56 ±0) + n * (76 ±0)` - // Estimated: `3643 + a * (2647 ±0) + m * (2662 ±0) + n * (2597 ±0)` - // Minimum execution time: 2_945_869_000 picoseconds. - Weight::from_parts(3_037_917_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - // Standard Error: 35_850 - .saturating_add(Weight::from_parts(7_558_563, 0).saturating_mul(n.into())) - // Standard Error: 35_850 - .saturating_add(Weight::from_parts(501_089, 0).saturating_mul(m.into())) - // Standard Error: 35_850 - .saturating_add(Weight::from_parts(538_921, 0).saturating_mul(a.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(a.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(m.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(a.into()))) - .saturating_add(Weight::from_parts(0, 2647).saturating_mul(a.into())) - .saturating_add(Weight::from_parts(0, 2662).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 2597).saturating_mul(n.into())) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:0) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn mint() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 36_225_000 picoseconds. - Weight::from_parts(36_858_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:1) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn burn() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 37_021_000 picoseconds. - Weight::from_parts(37_749_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:2) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 26_884_000 picoseconds. - Weight::from_parts(27_414_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:5000 w:5000) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn redeposit(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `738 + i * (76 ±0)` - // Estimated: `3643 + i * (2597 ±0)` - // Minimum execution time: 14_797_000 picoseconds. - Weight::from_parts(14_943_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - // Standard Error: 25_250 - .saturating_add(Weight::from_parts(18_014_600, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 2597).saturating_mul(i.into())) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn freeze() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 18_864_000 picoseconds. - Weight::from_parts(19_299_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn thaw() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 18_530_000 picoseconds. - Weight::from_parts(19_230_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn freeze_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_807_000 picoseconds. - Weight::from_parts(14_270_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn thaw_collection() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 13_657_000 picoseconds. - Weight::from_parts(14_059_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:2) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn transfer_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `356` - // Estimated: `3643` - // Minimum execution time: 22_108_000 picoseconds. - Weight::from_parts(22_520_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn set_team() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 14_128_000 picoseconds. - Weight::from_parts(14_481_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassAccount` (r:0 w:1) - /// Proof: `Uniques::ClassAccount` (`max_values`: None, `max_size`: Some(68), added: 2543, mode: `MaxEncodedLen`) - fn force_item_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 17_114_000 picoseconds. - Weight::from_parts(17_570_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - fn set_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `3652` - // Minimum execution time: 40_412_000 picoseconds. - Weight::from_parts(43_009_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:0) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Attribute` (r:1 w:1) - /// Proof: `Uniques::Attribute` (`max_values`: None, `max_size`: Some(172), added: 2647, mode: `MaxEncodedLen`) - fn clear_attribute() -> Weight { - // Proof Size summary in bytes: - // Measured: `756` - // Estimated: `3652` - // Minimum execution time: 38_044_000 picoseconds. - Weight::from_parts(38_871_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - fn set_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `348` - // Estimated: `3652` - // Minimum execution time: 30_016_000 picoseconds. - Weight::from_parts(30_723_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::InstanceMetadataOf` (r:1 w:1) - /// Proof: `Uniques::InstanceMetadataOf` (`max_values`: None, `max_size`: Some(187), added: 2662, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `559` - // Estimated: `3652` - // Minimum execution time: 30_942_000 picoseconds. - Weight::from_parts(31_527_000, 0) - .saturating_add(Weight::from_parts(0, 3652)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:1) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - fn set_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 30_727_000 picoseconds. - Weight::from_parts(31_688_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ClassMetadataOf` (r:1 w:1) - /// Proof: `Uniques::ClassMetadataOf` (`max_values`: None, `max_size`: Some(167), added: 2642, mode: `MaxEncodedLen`) - fn clear_collection_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `473` - // Estimated: `3643` - // Minimum execution time: 29_844_000 picoseconds. - Weight::from_parts(30_403_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - fn approve_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `428` - // Estimated: `3643` - // Minimum execution time: 19_155_000 picoseconds. - Weight::from_parts(19_909_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - fn cancel_approval() -> Weight { - // Proof Size summary in bytes: - // Measured: `461` - // Estimated: `3643` - // Minimum execution time: 19_163_000 picoseconds. - Weight::from_parts(19_804_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::OwnershipAcceptance` (r:1 w:1) - /// Proof: `Uniques::OwnershipAcceptance` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_accept_ownership() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3517` - // Minimum execution time: 15_413_000 picoseconds. - Weight::from_parts(15_762_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::CollectionMaxSupply` (r:1 w:1) - /// Proof: `Uniques::CollectionMaxSupply` (`max_values`: None, `max_size`: Some(24), added: 2499, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - fn set_collection_max_supply() -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `3643` - // Minimum execution time: 16_477_000 picoseconds. - Weight::from_parts(16_811_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:0) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:0 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - fn set_price() -> Weight { - // Proof Size summary in bytes: - // Measured: `259` - // Estimated: `3587` - // Minimum execution time: 16_415_000 picoseconds. - Weight::from_parts(16_906_000, 0) - .saturating_add(Weight::from_parts(0, 3587)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Uniques::Asset` (r:1 w:1) - /// Proof: `Uniques::Asset` (`max_values`: None, `max_size`: Some(122), added: 2597, mode: `MaxEncodedLen`) - /// Storage: `Uniques::ItemPriceOf` (r:1 w:1) - /// Proof: `Uniques::ItemPriceOf` (`max_values`: None, `max_size`: Some(89), added: 2564, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Class` (r:1 w:0) - /// Proof: `Uniques::Class` (`max_values`: None, `max_size`: Some(178), added: 2653, mode: `MaxEncodedLen`) - /// Storage: `Uniques::Account` (r:0 w:2) - /// Proof: `Uniques::Account` (`max_values`: None, `max_size`: Some(88), added: 2563, mode: `MaxEncodedLen`) - fn buy_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `540` - // Estimated: `3643` - // Minimum execution time: 35_814_000 picoseconds. - Weight::from_parts(36_569_000, 0) - .saturating_add(Weight::from_parts(0, 3643)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs deleted file mode 100644 index d028fb898a4..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_918_000 picoseconds. - Weight::from_parts(2_421_521, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_252 - .saturating_add(Weight::from_parts(6_625_635, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_304_000 picoseconds. - Weight::from_parts(5_546_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_847_000 picoseconds. - Weight::from_parts(1_224_975, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 3_818 - .saturating_add(Weight::from_parts(6_891_149, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 9_269_000 picoseconds. - Weight::from_parts(9_604_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_855_000 picoseconds. - Weight::from_parts(6_965_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_631 - .saturating_add(Weight::from_parts(6_545_496, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs deleted file mode 100644 index 27867e278ed..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/pallet_xcm.rs +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=asset-hub-polkadot-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 25_203_000 picoseconds. - Weight::from_parts(25_927_000, 0) - .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 20_113_000 picoseconds. - Weight::from_parts(20_439_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 14_959_000 picoseconds. - Weight::from_parts(15_264_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_399_000 picoseconds. - Weight::from_parts(7_674_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_default_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_388_000 picoseconds. - Weight::from_parts(2_522_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_subscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 28_791_000 picoseconds. - Weight::from_parts(29_443_000, 0) - .saturating_add(Weight::from_parts(0, 3540)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_unsubscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `292` - // Estimated: `3757` - // Minimum execution time: 30_880_000 picoseconds. - Weight::from_parts(31_675_000, 0) - .saturating_add(Weight::from_parts(0, 3757)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) - /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_suspension() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_365_000 picoseconds. - Weight::from_parts(2_550_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_supported_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `129` - // Estimated: `11019` - // Minimum execution time: 17_185_000 picoseconds. - Weight::from_parts(17_680_000, 0) - .saturating_add(Weight::from_parts(0, 11019)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notifiers() -> Weight { - // Proof Size summary in bytes: - // Measured: `133` - // Estimated: `11023` - // Minimum execution time: 16_974_000 picoseconds. - Weight::from_parts(17_660_000, 0) - .saturating_add(Weight::from_parts(0, 11023)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn already_notified_target() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `13505` - // Minimum execution time: 18_536_000 picoseconds. - Weight::from_parts(19_292_000, 0) - .saturating_add(Weight::from_parts(0, 13505)) - .saturating_add(T::DbWeight::get().reads(5)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn notify_current_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `6082` - // Minimum execution time: 27_368_000 picoseconds. - Weight::from_parts(28_161_000, 0) - .saturating_add(Weight::from_parts(0, 6082)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn notify_target_migration_fail() -> Weight { - // Proof Size summary in bytes: - // Measured: `172` - // Estimated: `8587` - // Minimum execution time: 9_553_000 picoseconds. - Weight::from_parts(9_899_000, 0) - .saturating_add(Weight::from_parts(0, 8587)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notify_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `11030` - // Minimum execution time: 17_445_000 picoseconds. - Weight::from_parts(18_206_000, 0) - .saturating_add(Weight::from_parts(0, 11030)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn migrate_and_notify_old_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `146` - // Estimated: `11036` - // Minimum execution time: 34_200_000 picoseconds. - Weight::from_parts(35_198_000, 0) - .saturating_add(Weight::from_parts(0, 11036)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn new_query() -> Weight { - // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `1554` - // Minimum execution time: 4_679_000 picoseconds. - Weight::from_parts(4_841_000, 0) - .saturating_add(Weight::from_parts(0, 1554)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn take_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `7706` - // Estimated: `11171` - // Minimum execution time: 27_281_000 picoseconds. - Weight::from_parts(27_694_000, 0) - .saturating_add(Weight::from_parts(0, 11171)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/paritydb_weights.rs deleted file mode 100644 index 25679703831..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/paritydb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights - /// are available for brave runtime engineers who may want to try this out as default. - pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::ParityDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/rocksdb_weights.rs deleted file mode 100644 index 3dd817aa6f1..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/rocksdb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout - /// the runtime. - pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::RocksDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs deleted file mode 100644 index 44fcb91d688..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/mod.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -mod pallet_xcm_benchmarks_fungible; -mod pallet_xcm_benchmarks_generic; - -use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; -use frame_support::weights::Weight; -use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; -use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; -use xcm::{latest::prelude::*, DoubleEncoded}; - -trait WeighMultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight; -} - -const MAX_ASSETS: u64 = 100; - -impl WeighMultiAssets for MultiAssetFilter { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - match self { - Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), - Self::Wild(asset) => match asset { - All => weight.saturating_mul(MAX_ASSETS), - AllOf { fun, .. } => match fun { - WildFungibility::Fungible => weight, - // Magic number 2 has to do with the fact that we could have up to 2 times - // MaxAssetsIntoHolding in the worst-case scenario. - WildFungibility::NonFungible => - weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), - }, - AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - }, - } - } -} - -impl WeighMultiAssets for MultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - weight.saturating_mul(self.inner().iter().count() as u64) - } -} - -pub struct AssetHubPolkadotXcmWeight(core::marker::PhantomData); -impl XcmWeightInfo for AssetHubPolkadotXcmWeight { - fn withdraw_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) - } - fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) - } - fn receive_teleported_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) - } - fn query_response( - _query_id: &u64, - _response: &Response, - _max_weight: &Weight, - _querier: &Option, - ) -> Weight { - XcmGeneric::::query_response() - } - fn transfer_asset(assets: &MultiAssets, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_asset()) - } - fn transfer_reserve_asset( - assets: &MultiAssets, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_reserve_asset()) - } - fn transact( - _origin_type: &OriginKind, - _require_weight_at_most: &Weight, - _call: &DoubleEncoded, - ) -> Weight { - XcmGeneric::::transact() - } - fn hrmp_new_channel_open_request( - _sender: &u32, - _max_message_size: &u32, - _max_capacity: &u32, - ) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_accepted(_recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn clear_origin() -> Weight { - XcmGeneric::::clear_origin() - } - fn descend_origin(_who: &InteriorMultiLocation) -> Weight { - XcmGeneric::::descend_origin() - } - fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_error() - } - - fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) - } - fn deposit_reserve_asset( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_reserve_asset()) - } - fn exchange_asset(_give: &MultiAssetFilter, _receive: &MultiAssets, _maximal: &bool) -> Weight { - Weight::MAX - } - fn initiate_reserve_withdraw( - assets: &MultiAssetFilter, - _reserve: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) - } - fn initiate_teleport( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) - } - fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { - XcmGeneric::::report_holding() - } - fn buy_execution(_fees: &MultiAsset, _weight_limit: &WeightLimit) -> Weight { - XcmGeneric::::buy_execution() - } - fn refund_surplus() -> Weight { - XcmGeneric::::refund_surplus() - } - fn set_error_handler(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_error_handler() - } - fn set_appendix(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_appendix() - } - fn clear_error() -> Weight { - XcmGeneric::::clear_error() - } - fn claim_asset(_assets: &MultiAssets, _ticket: &MultiLocation) -> Weight { - XcmGeneric::::claim_asset() - } - fn trap(_code: &u64) -> Weight { - XcmGeneric::::trap() - } - fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { - XcmGeneric::::subscribe_version() - } - fn unsubscribe_version() -> Weight { - XcmGeneric::::unsubscribe_version() - } - fn burn_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::burn_asset()) - } - fn expect_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::expect_asset()) - } - fn expect_origin(_origin: &Option) -> Weight { - XcmGeneric::::expect_origin() - } - fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { - XcmGeneric::::expect_error() - } - fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { - XcmGeneric::::expect_transact_status() - } - fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::query_pallet() - } - fn expect_pallet( - _index: &u32, - _name: &Vec, - _module_name: &Vec, - _crate_major: &u32, - _min_crate_minor: &u32, - ) -> Weight { - XcmGeneric::::expect_pallet() - } - fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_transact_status() - } - fn clear_transact_status() -> Weight { - XcmGeneric::::clear_transact_status() - } - fn universal_origin(_: &Junction) -> Weight { - Weight::MAX - } - fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { - Weight::MAX - } - fn lock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn unlock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn note_unlockable(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn request_unlock(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn set_fees_mode(_: &bool) -> Weight { - XcmGeneric::::set_fees_mode() - } - fn set_topic(_topic: &[u8; 32]) -> Weight { - XcmGeneric::::set_topic() - } - fn clear_topic() -> Weight { - XcmGeneric::::clear_topic() - } - fn alias_origin(_: &MultiLocation) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX - } - fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { - XcmGeneric::::unpaid_execution() - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs deleted file mode 100644 index 1036d4cbf00..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::fungible -// --chain=asset-hub-polkadot-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::fungible`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn withdraw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 25_903_000 picoseconds. - Weight::from_parts(26_768_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn transfer_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `6196` - // Minimum execution time: 51_042_000 picoseconds. - Weight::from_parts(51_939_000, 6196) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn transfer_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `176` - // Estimated: `6196` - // Minimum execution time: 74_626_000 picoseconds. - Weight::from_parts(75_963_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) - } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn reserve_asset_deposited() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_reserve_withdraw() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 480_030_000 picoseconds. - Weight::from_parts(486_039_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn receive_teleported_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_936_000 picoseconds. - Weight::from_parts(4_033_000, 0) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn deposit_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 26_274_000 picoseconds. - Weight::from_parts(26_609_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn deposit_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3593` - // Minimum execution time: 52_888_000 picoseconds. - Weight::from_parts(53_835_000, 3593) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_teleport() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 33_395_000 picoseconds. - Weight::from_parts(33_827_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs deleted file mode 100644 index 12ef504727e..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::generic` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=asset-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::generic`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_holding() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 425_235_000 picoseconds. - Weight::from_parts(432_935_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn buy_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_070_000 picoseconds. - Weight::from_parts(4_329_000, 0) - } - // Storage: `PolkadotXcm::Queries` (r:1 w:0) - // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn query_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `3534` - // Minimum execution time: 11_464_000 picoseconds. - Weight::from_parts(11_829_000, 3534) - .saturating_add(T::DbWeight::get().reads(1)) - } - pub fn transact() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_574_000 picoseconds. - Weight::from_parts(14_021_000, 0) - } - pub fn refund_surplus() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_276_000 picoseconds. - Weight::from_parts(4_479_000, 0) - } - pub fn set_error_handler() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_833_000 picoseconds. - Weight::from_parts(2_939_000, 0) - } - pub fn set_appendix() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_797_000 picoseconds. - Weight::from_parts(2_901_000, 0) - } - pub fn clear_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_855_000 picoseconds. - Weight::from_parts(2_961_000, 0) - } - pub fn descend_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_589_000 picoseconds. - Weight::from_parts(3_720_000, 0) - } - pub fn clear_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_786_000 picoseconds. - Weight::from_parts(2_889_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 25_740_000 picoseconds. - Weight::from_parts(26_355_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) - // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn claim_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `126` - // Estimated: `3591` - // Minimum execution time: 16_206_000 picoseconds. - Weight::from_parts(16_651_000, 3591) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn trap() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_819_000 picoseconds. - Weight::from_parts(2_944_000, 0) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn subscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 28_216_000 picoseconds. - Weight::from_parts(28_878_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn unsubscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_795_000 picoseconds. - Weight::from_parts(5_008_000, 0) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn burn_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 135_205_000 picoseconds. - Weight::from_parts(140_623_000, 0) - } - pub fn expect_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 12_791_000 picoseconds. - Weight::from_parts(13_114_000, 0) - } - pub fn expect_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(3_091_000, 0) - } - pub fn expect_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_828_000 picoseconds. - Weight::from_parts(2_947_000, 0) - } - pub fn expect_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_980_000 picoseconds. - Weight::from_parts(3_123_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn query_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 29_672_000 picoseconds. - Weight::from_parts(30_318_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn expect_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_421_000 picoseconds. - Weight::from_parts(5_614_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `75` - // Estimated: `3540` - // Minimum execution time: 25_621_000 picoseconds. - Weight::from_parts(26_486_000, 3540) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn clear_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_873_000 picoseconds. - Weight::from_parts(2_973_000, 0) - } - pub fn set_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_861_000 picoseconds. - Weight::from_parts(2_923_000, 0) - } - pub fn clear_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_845_000 picoseconds. - Weight::from_parts(2_970_000, 0) - } - pub fn set_fees_mode() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_773_000 picoseconds. - Weight::from_parts(2_922_000, 0) - } - pub fn unpaid_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_980_000 picoseconds. - Weight::from_parts(3_095_000, 0) - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs deleted file mode 100644 index b3c2ce4da76..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/src/xcm_config.rs +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - AccountId, AllPalletsWithSystem, Assets, Authorship, Balance, Balances, ForeignAssets, - ParachainInfo, ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - TransactionByteFee, TrustBackedAssetsInstance, WeightToFee, XcmpQueue, CENTS, -}; -use assets_common::matching::{FromSiblingParachain, IsForeignConcreteAsset}; -use frame_support::{ - match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing, PalletInfoAccess}, -}; -use frame_system::EnsureRoot; -use pallet_xcm::XcmPassthrough; -use parachains_common::{ - impls::ToStakingPot, - xcm_config::{AssetFeeAsExistentialDepositMultiplier, ConcreteAssetFromSystem}, -}; -use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use sp_runtime::traits::ConvertInto; -use xcm::latest::prelude::*; -use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, DescribeFamily, DescribePalletTerminal, - EnsureXcmOrigin, FungiblesAdapter, HashedDescription, IsConcrete, LocalMint, NoChecking, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, StartsWith, StartsWithExplicitGlobalConsensus, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, -}; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; - -parameter_types! { - pub const DotLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Polkadot); - pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorMultiLocation = - X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); - pub UniversalLocationNetworkId: NetworkId = UniversalLocation::get().global_consensus().unwrap(); - pub TrustBackedAssetsPalletLocation: MultiLocation = - PalletInstance(::index() as u8).into(); - pub CheckingAccount: AccountId = PolkadotXcm::check_account(); - pub FellowshipLocation: MultiLocation = MultiLocation::new(1, Parachain(1001)); - pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); -} - -/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used -/// when determining ownership of accounts for asset transacting and when attempting to use XCM -/// `Transact` in order to determine the dispatch Origin. -pub type LocationToAccountId = ( - // The parent (Relay-chain) origin converts to the parent `AccountId`. - ParentIsPreset, - // Sibling parachain origins convert to AccountId via the `ParaId::into`. - SiblingParachainConvertsVia, - // Straight up local `AccountId32` origins just alias directly to `AccountId`. - AccountId32Aliases, - // Foreign chain account alias into local accounts according to a hash of their standard - // description. - HashedDescription>, -); - -/// Means for transacting the native currency on this chain. -pub type CurrencyTransactor = CurrencyAdapter< - // Use this currency: - Balances, - // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Convert an XCM MultiLocation into a local account id: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We don't track any teleports of `Balances`. - (), ->; - -/// `AssetId`/`Balance` converter for `TrustBackedAssets`. -pub type TrustBackedAssetsConvertedConcreteId = - assets_common::TrustBackedAssetsConvertedConcreteId; - -/// Means for transacting assets besides the native currency on this chain. -pub type FungiblesTransactor = FungiblesAdapter< - // Use this fungibles implementation: - Assets, - // Use this currency when it is a fungible asset matching the given location or name: - TrustBackedAssetsConvertedConcreteId, - // Convert an XCM MultiLocation into a local account id: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We only want to allow teleports of known assets. We use non-zero issuance as an indication - // that this asset is known. - LocalMint>, - // The account to use for tracking teleports. - CheckingAccount, ->; - -/// `AssetId/Balance` converter for `TrustBackedAssets` -pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConvertedConcreteId< - ( - // Ignore `TrustBackedAssets` explicitly - StartsWith, - // Ignore assets that start explicitly with our `GlobalConsensus(NetworkId)`, means: - // - foreign assets from our consensus should be: `MultiLocation {parents: 1, - // X*(Parachain(xyz), ..)}` - // - foreign assets outside our consensus with the same `GlobalConsensus(NetworkId)` won't - // be accepted here - StartsWithExplicitGlobalConsensus, - ), - Balance, ->; - -/// Means for transacting foreign assets from different global consensus. -pub type ForeignFungiblesTransactor = FungiblesAdapter< - // Use this fungibles implementation: - ForeignAssets, - // Use this currency when it is a fungible asset matching the given location or name: - ForeignAssetsConvertedConcreteId, - // Convert an XCM MultiLocation into a local account id: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We dont need to check teleports here. - NoChecking, - // The account to use for tracking teleports. - CheckingAccount, ->; - -/// Means for transacting assets on this chain. -pub type AssetTransactors = (CurrencyTransactor, FungiblesTransactor, ForeignFungiblesTransactor); - -/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// biases the kind of local `Origin` it will become. -pub type XcmOriginToTransactDispatchOrigin = ( - // Sovereign account converter; this attempts to derive an `AccountId` from the origin location - // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. - SovereignSignedViaLocation, - // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when - // recognised. - RelayChainAsNative, - // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when - // recognised. - SiblingParachainAsNative, - // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a - // transaction from the Root origin. - ParentAsSuperuser, - // Native signed account converter; this just converts an `AccountId32` origin into a normal - // `RuntimeOrigin::Signed` origin of the same 32-byte value. - SignedAccountId32AsNative, - // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. - XcmPassthrough, -); - -parameter_types! { - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; - pub XcmAssetFeesReceiver: Option = Authorship::author(); -} - -match_types! { - pub type ParentOrParentsPlurality: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(Plurality { .. }) } - }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; - pub type FellowsPlurality: impl Contains = { - MultiLocation { parents: 1, interior: X2(Parachain(1001), Plurality { id: BodyId::Technical, ..}) } - }; - pub type FellowshipSalaryPallet: impl Contains = { - MultiLocation { parents: 1, interior: X2(Parachain(1001), PalletInstance(64)) } - }; - pub type AmbassadorSalaryPallet: impl Contains = { - MultiLocation { parents: 1, interior: X2(Parachain(1001), PalletInstance(74)) } - }; -} - -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Assets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::refund { .. }, - ) | RuntimeCall::ForeignAssets( - pallet_assets::Call::create { .. } | - pallet_assets::Call::force_create { .. } | - pallet_assets::Call::start_destroy { .. } | - pallet_assets::Call::destroy_accounts { .. } | - pallet_assets::Call::destroy_approvals { .. } | - pallet_assets::Call::finish_destroy { .. } | - pallet_assets::Call::mint { .. } | - pallet_assets::Call::burn { .. } | - pallet_assets::Call::transfer { .. } | - pallet_assets::Call::transfer_keep_alive { .. } | - pallet_assets::Call::force_transfer { .. } | - pallet_assets::Call::freeze { .. } | - pallet_assets::Call::thaw { .. } | - pallet_assets::Call::freeze_asset { .. } | - pallet_assets::Call::thaw_asset { .. } | - pallet_assets::Call::transfer_ownership { .. } | - pallet_assets::Call::set_team { .. } | - pallet_assets::Call::set_metadata { .. } | - pallet_assets::Call::clear_metadata { .. } | - pallet_assets::Call::force_set_metadata { .. } | - pallet_assets::Call::force_clear_metadata { .. } | - pallet_assets::Call::force_asset_status { .. } | - pallet_assets::Call::approve_transfer { .. } | - pallet_assets::Call::cancel_approval { .. } | - pallet_assets::Call::force_cancel_approval { .. } | - pallet_assets::Call::transfer_approved { .. } | - pallet_assets::Call::touch { .. } | - pallet_assets::Call::refund { .. }, - ) | RuntimeCall::Nfts( - pallet_nfts::Call::create { .. } | - pallet_nfts::Call::force_create { .. } | - pallet_nfts::Call::destroy { .. } | - pallet_nfts::Call::mint { .. } | - pallet_nfts::Call::force_mint { .. } | - pallet_nfts::Call::burn { .. } | - pallet_nfts::Call::transfer { .. } | - pallet_nfts::Call::lock_item_transfer { .. } | - pallet_nfts::Call::unlock_item_transfer { .. } | - pallet_nfts::Call::lock_collection { .. } | - pallet_nfts::Call::transfer_ownership { .. } | - pallet_nfts::Call::set_team { .. } | - pallet_nfts::Call::force_collection_owner { .. } | - pallet_nfts::Call::force_collection_config { .. } | - pallet_nfts::Call::approve_transfer { .. } | - pallet_nfts::Call::cancel_approval { .. } | - pallet_nfts::Call::clear_all_transfer_approvals { .. } | - pallet_nfts::Call::lock_item_properties { .. } | - pallet_nfts::Call::set_attribute { .. } | - pallet_nfts::Call::force_set_attribute { .. } | - pallet_nfts::Call::clear_attribute { .. } | - pallet_nfts::Call::approve_item_attributes { .. } | - pallet_nfts::Call::cancel_item_attributes_approval { .. } | - pallet_nfts::Call::set_metadata { .. } | - pallet_nfts::Call::clear_metadata { .. } | - pallet_nfts::Call::set_collection_metadata { .. } | - pallet_nfts::Call::clear_collection_metadata { .. } | - pallet_nfts::Call::set_accept_ownership { .. } | - pallet_nfts::Call::set_collection_max_supply { .. } | - pallet_nfts::Call::update_mint_settings { .. } | - pallet_nfts::Call::set_price { .. } | - pallet_nfts::Call::buy_item { .. } | - pallet_nfts::Call::pay_tips { .. } | - pallet_nfts::Call::create_swap { .. } | - pallet_nfts::Call::cancel_swap { .. } | - pallet_nfts::Call::claim_swap { .. }, - ) | RuntimeCall::Uniques( - pallet_uniques::Call::create { .. } | - pallet_uniques::Call::force_create { .. } | - pallet_uniques::Call::destroy { .. } | - pallet_uniques::Call::mint { .. } | - pallet_uniques::Call::burn { .. } | - pallet_uniques::Call::transfer { .. } | - pallet_uniques::Call::freeze { .. } | - pallet_uniques::Call::thaw { .. } | - pallet_uniques::Call::freeze_collection { .. } | - pallet_uniques::Call::thaw_collection { .. } | - pallet_uniques::Call::transfer_ownership { .. } | - pallet_uniques::Call::set_team { .. } | - pallet_uniques::Call::approve_transfer { .. } | - pallet_uniques::Call::cancel_approval { .. } | - pallet_uniques::Call::force_item_status { .. } | - pallet_uniques::Call::set_attribute { .. } | - pallet_uniques::Call::clear_attribute { .. } | - pallet_uniques::Call::set_metadata { .. } | - pallet_uniques::Call::clear_metadata { .. } | - pallet_uniques::Call::set_collection_metadata { .. } | - pallet_uniques::Call::clear_collection_metadata { .. } | - pallet_uniques::Call::set_accept_ownership { .. } | - pallet_uniques::Call::set_collection_max_supply { .. } | - pallet_uniques::Call::set_price { .. } | - pallet_uniques::Call::buy_item { .. } - ) - ) - } -} - -pub type Barrier = TrailingSetTopicAsId< - DenyThenTry< - DenyReserveTransferToRelayChain, - ( - TakeWeightCredit, - // Expected responses are OK. - AllowKnownQueryResponses, - // Allow XCMs with some computed origins to pass through. - WithComputedOrigin< - ( - // If the message is one that immediately attempts to pay for execution, then - // allow it. - AllowTopLevelPaidExecutionFrom, - // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality - // get free execution. - AllowExplicitUnpaidExecutionFrom<( - ParentOrParentsPlurality, - FellowsPlurality, - FellowshipSalaryPallet, - AmbassadorSalaryPallet, - )>, - // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, - ), - UniversalLocation, - ConstU32<8>, - >, - ), - >, ->; - -pub type AssetFeeAsExistentialDepositMultiplierFeeCharger = AssetFeeAsExistentialDepositMultiplier< - Runtime, - WeightToFee, - pallet_assets::BalanceToAssetBalance, - TrustBackedAssetsInstance, ->; - -/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: -/// -/// - DOT with the parent Relay Chain and sibling system parachains; and -/// - Sibling parachains' assets from where they originate (as `ForeignCreators`). -pub type TrustedTeleporters = ( - ConcreteAssetFromSystem, - IsForeignConcreteAsset>>, -); - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = AssetTransactors; - type OriginConverter = XcmOriginToTransactDispatchOrigin; - // Asset Hub Polkadot does not recognize a reserve location for any asset. This does not prevent - // Asset Hub acting _as_ a reserve location for DOT and assets created under `pallet-assets`. - // For DOT, users must use teleport where allowed (e.g. with the Relay Chain). - type IsReserve = (); - type IsTeleporter = TrustedTeleporters; - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = WeightInfoBounds< - crate::weights::xcm::AssetHubPolkadotXcmWeight, - RuntimeCall, - MaxInstructions, - >; - type Trader = ( - UsingComponents>, - cumulus_primitives_utility::TakeFirstAssetTrader< - AccountId, - AssetFeeAsExistentialDepositMultiplierFeeCharger, - TrustBackedAssetsConvertedConcreteId, - Assets, - cumulus_primitives_utility::XcmFeesTo32ByteAccount< - FungiblesTransactor, - AccountId, - XcmAssetFeesReceiver, - >, - >, - ); - type ResponseHandler = PolkadotXcm; - type AssetTrap = PolkadotXcm; - type AssetClaims = PolkadotXcm; - type SubscriptionService = PolkadotXcm; - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; - type Aliasers = Nothing; -} - -/// Converts a local signed origin into an XCM multilocation. -/// Forms the basis for local origins sending/executing XCMs. -pub type LocalOriginToLocation = SignedToAccountId32; - -parameter_types! { - /// The asset ID for the asset that we use to pay for message delivery fees. - pub FeeAssetId: AssetId = Concrete(DotLocation::get()); - /// The base fee for the message delivery fees. - pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); -} - -pub type PriceForParentDelivery = - ExponentialPrice; - -/// The means for routing XCM messages which are not for local execution into the right message -/// queues. -pub type XcmRouter = WithUniqueTopic<( - // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, - // ..and XCMP to communicate with the sibling chains. - XcmpQueue, -)>; - -impl pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - // We want to disallow users sending (arbitrary) XCMs from this chain. - type SendXcmOrigin = EnsureXcmOrigin; - type XcmRouter = XcmRouter; - // We support local origins dispatching XCM executions in principle... - type ExecuteXcmOrigin = EnsureXcmOrigin; - // ... but disallow generic XCM execution. As a result only teleports and reserve transfers are - // allowed. - type XcmExecuteFilter = Nothing; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Everything; - type Weigher = WeightInfoBounds< - crate::weights::xcm::AssetHubPolkadotXcmWeight, - RuntimeCall, - MaxInstructions, - >; - type UniversalLocation = UniversalLocation; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; - type Currency = Balances; - type CurrencyMatcher = (); - type TrustedLockers = (); - type SovereignAccountOf = LocationToAccountId; - type MaxLockers = ConstU32<8>; - type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - type AdminOrigin = EnsureRoot; - type MaxRemoteLockConsumers = ConstU32<0>; - type RemoteLockConsumerIdentifier = (); -} - -impl cumulus_pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; -} - -pub type ForeignCreatorsSovereignAccountOf = ( - SiblingParachainConvertsVia, - AccountId32Aliases, - ParentIsPreset, -); - -/// Simple conversion of `u32` into an `AssetId` for use in benchmarking. -pub struct XcmBenchmarkHelper; -#[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> MultiLocation { - MultiLocation { parents: 1, interior: X1(Parachain(id)) } - } -} - -#[test] -fn foreign_pallet_has_correct_local_account() { - use sp_core::crypto::{Ss58AddressFormat, Ss58Codec}; - use xcm_executor::traits::ConvertLocation; - - const COLLECTIVES_PARAID: u32 = 1001; - const FELLOWSHIP_SALARY_PALLET_ID: u8 = 64; - let fellowship_salary = - (Parent, Parachain(COLLECTIVES_PARAID), PalletInstance(FELLOWSHIP_SALARY_PALLET_ID)); - let account = LocationToAccountId::convert_location(&fellowship_salary.into()).unwrap(); - let polkadot = Ss58AddressFormat::try_from("polkadot").unwrap(); - let address = Ss58Codec::to_ss58check_with_version(&account, polkadot); - assert_eq!(address, "13w7NdvSR1Af8xsQTArDtZmVvjE8XhWNdL4yed3iFHrUNCnS"); -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs deleted file mode 100644 index b7e44646ece..00000000000 --- a/cumulus/parachains/runtimes/assets/asset-hub-polkadot/tests/tests.rs +++ /dev/null @@ -1,683 +0,0 @@ -// This file is part of Cumulus. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Tests for the Statemint (Polkadot Assets Hub) chain. - -use asset_hub_polkadot_runtime::xcm_config::{ - AssetFeeAsExistentialDepositMultiplierFeeCharger, CheckingAccount, DotLocation, - ForeignCreatorsSovereignAccountOf, LocationToAccountId, TrustBackedAssetsPalletLocation, - XcmConfig, -}; -pub use asset_hub_polkadot_runtime::{ - AllPalletsWithoutSystem, AssetDeposit, Assets, Balances, ExistentialDeposit, ForeignAssets, - ForeignAssetsInstance, MetadataDepositBase, MetadataDepositPerByte, ParachainSystem, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, System, TrustBackedAssetsInstance, XcmpQueue, -}; -use asset_test_utils::{CollatorSessionKeys, ExtBuilder}; -use codec::{Decode, Encode}; -use cumulus_primitives_utility::ChargeWeightInFungibles; -use frame_support::{ - assert_noop, assert_ok, - traits::fungibles::InspectEnumerable, - weights::{Weight, WeightToFee as WeightToFeeT}, -}; -use parachains_common::{ - polkadot::fee::WeightToFee, AccountId, AssetHubPolkadotAuraId as AuraId, - AssetIdForTrustBackedAssets, Balance, -}; -use sp_runtime::traits::MaybeEquivalence; -use xcm::latest::prelude::*; -use xcm_executor::traits::{Identity, JustTry, WeightTrader}; - -const ALICE: [u8; 32] = [1u8; 32]; -const SOME_ASSET_ADMIN: [u8; 32] = [5u8; 32]; - -type AssetIdForTrustBackedAssetsConvert = - assets_common::AssetIdForTrustBackedAssetsConvert; - -type RuntimeHelper = asset_test_utils::RuntimeHelper; - -fn collator_session_keys() -> CollatorSessionKeys { - CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - ) -} - -#[test] -fn test_asset_xcm_trader() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - let minimum_asset_balance = 333333333_u128; - let local_asset_id = 1; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - local_asset_id.into(), - AccountId::from(ALICE).into(), - true, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - local_asset_id.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - // get asset id as multilocation - let asset_multilocation = - AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(); - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 400e9 weight - // Because of the ED being higher in kusama's asset hub - // and not to complicate things, we use a little - // bit more of weight - let bought = Weight::from_parts(400_000_000_000u64, 0); - - // Lets calculate amount needed - let asset_amount_needed = - AssetFeeAsExistentialDepositMultiplierFeeCharger::charge_weight_in_fungibles( - local_asset_id, - bought, - ) - .expect("failed to compute"); - - // Lets pay with: asset_amount_needed + asset_amount_extra - let asset_amount_extra = 100_u128; - let asset: MultiAsset = - (asset_multilocation, asset_amount_needed + asset_amount_extra).into(); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Lets buy_weight and make sure buy_weight does not return an error - let unused_assets = trader.buy_weight(bought, asset.into(), &ctx).expect("Expected Ok"); - // Check whether a correct amount of unused assets is returned - assert_ok!( - unused_assets.ensure_contains(&(asset_multilocation, asset_amount_extra).into()) - ); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has received the amount - assert_eq!( - Assets::balance(local_asset_id, AccountId::from(ALICE)), - minimum_asset_balance + asset_amount_needed - ); - - // We also need to ensure the total supply increased - assert_eq!( - Assets::total_supply(local_asset_id), - minimum_asset_balance + asset_amount_needed - ); - }); -} - -#[test] -fn test_asset_xcm_trader_with_refund() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - 1.into(), - AccountId::from(ALICE).into(), - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 400e9 weight - // Because of the ED being higher in kusama's asset hub - // and not to complicate things, we use a little - // bit more of weight - let bought = Weight::from_parts(400_000_000_000u64, 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - // lets calculate amount needed - let amount_bought = WeightToFee::weight_to_fee(&bought); - - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - - // Make sure buy_weight does not return an error - assert_ok!(trader.buy_weight(bought, asset.clone().into(), &ctx)); - - // Make sure again buy_weight does return an error - // This assert relies on the fact, that we use `TakeFirstAssetTrader` in `WeightTrader` - // tuple chain, which cannot be called twice - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // We actually use half of the weight - let weight_used = bought / 2; - - // Make sure refurnd works. - let amount_refunded = WeightToFee::weight_to_fee(&(bought - weight_used)); - - assert_eq!( - trader.refund_weight(bought - weight_used, &ctx), - Some((asset_multilocation, amount_refunded).into()) - ); - - // Drop trader - drop(trader); - - // We only should have paid for half of the bought weight - let fees_paid = WeightToFee::weight_to_fee(&weight_used); - - assert_eq!( - Assets::balance(1, AccountId::from(ALICE)), - ExistentialDeposit::get() + fees_paid - ); - - // We also need to ensure the total supply increased - assert_eq!(Assets::total_supply(1), ExistentialDeposit::get() + fees_paid); - }); -} - -#[test] -fn test_asset_xcm_trader_refund_not_possible_since_amount_less_than_ed() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 50e9 weight - // Because of the ED being higher in kusama's asset hub - // and not to complicate things, we use a little - // bit more of weight - let bought = Weight::from_parts(50_000_000_000u64, 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let amount_bought = WeightToFee::weight_to_fee(&bought); - - assert!( - amount_bought < ExistentialDeposit::get(), - "we are testing what happens when the amount does not exceed ED" - ); - - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - - // Buy weight should return an error - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // not credited since the ED is higher than this value - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), 0); - - // We also need to ensure the total supply did not increase - assert_eq!(Assets::total_supply(1), 0); - }); -} - -#[test] -fn test_that_buying_ed_refund_does_not_refund() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // We need root origin to create a sufficient asset - // We set existential deposit to be identical to the one for Balances first - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - true, - ExistentialDeposit::get() - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are gonna buy ED - let bought = Weight::from_parts(ExistentialDeposit::get().try_into().unwrap(), 0); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let amount_bought = WeightToFee::weight_to_fee(&bought); - - assert!( - amount_bought < ExistentialDeposit::get(), - "we are testing what happens when the amount does not exceed ED" - ); - - // We know we will have to buy at least ED, so lets make sure first it will - // fail with a payment of less than ED - let asset: MultiAsset = (asset_multilocation, amount_bought).into(); - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // Now lets buy ED at least - let asset: MultiAsset = (asset_multilocation, ExistentialDeposit::get()).into(); - - // Buy weight should work - assert_ok!(trader.buy_weight(bought, asset.into(), &ctx)); - - // Should return None. We have a specific check making sure we dont go below ED for - // drop payment - assert_eq!(trader.refund_weight(bought, &ctx), None); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has received the amount - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), ExistentialDeposit::get()); - - // We also need to ensure the total supply increased - assert_eq!(Assets::total_supply(1), ExistentialDeposit::get()); - }); -} - -#[test] -fn test_asset_xcm_trader_not_possible_for_non_sufficient_assets() { - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - // Create a non-sufficient asset - let minimum_asset_balance = 1_000_000_u128; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - 1.into(), - AccountId::from(ALICE).into(), - false, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - 1.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - let mut trader = ::Trader::new(); - let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - - // Set Alice as block author, who will receive fees - RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); - - // We are going to buy 400e9 weight - // Because of the ED being higher in kusama's asset hub - // and not to complicate things, we use a little - // bit more of weight - let bought = Weight::from_parts(400_000_000_000u64, 0); - - // lets calculate amount needed - let asset_amount_needed = WeightToFee::weight_to_fee(&bought); - - let asset_multilocation = AssetIdForTrustBackedAssetsConvert::convert_back(&1).unwrap(); - - let asset: MultiAsset = (asset_multilocation, asset_amount_needed).into(); - - // Make sure again buy_weight does return an error - assert_noop!(trader.buy_weight(bought, asset.into(), &ctx), XcmError::TooExpensive); - - // Drop trader - drop(trader); - - // Make sure author(Alice) has NOT received the amount - assert_eq!(Assets::balance(1, AccountId::from(ALICE)), minimum_asset_balance); - - // We also need to ensure the total supply NOT increased - assert_eq!(Assets::total_supply(1), minimum_asset_balance); - }); -} - -#[test] -fn test_assets_balances_api_works() { - use assets_common::runtime_api::runtime_decl_for_fungibles_api::FungiblesApi; - - ExtBuilder::::default() - .with_collators(vec![AccountId::from(ALICE)]) - .with_session_keys(vec![( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) }, - )]) - .build() - .execute_with(|| { - let local_asset_id = 1; - let foreign_asset_id_multilocation = - MultiLocation { parents: 1, interior: X2(Parachain(1234), GeneralIndex(12345)) }; - - // check before - assert_eq!(Assets::balance(local_asset_id, AccountId::from(ALICE)), 0); - assert_eq!( - ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), - 0 - ); - assert_eq!(Balances::free_balance(AccountId::from(ALICE)), 0); - assert!(Runtime::query_account_balances(AccountId::from(ALICE)) - .unwrap() - .try_as::() - .unwrap() - .is_none()); - - // Drip some balance - use frame_support::traits::fungible::Mutate; - let some_currency = ExistentialDeposit::get(); - Balances::mint_into(&AccountId::from(ALICE), some_currency).unwrap(); - - // We need root origin to create a sufficient asset - let minimum_asset_balance = 3333333_u128; - assert_ok!(Assets::force_create( - RuntimeHelper::root_origin(), - local_asset_id.into(), - AccountId::from(ALICE).into(), - true, - minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(Assets::mint( - RuntimeHelper::origin_of(AccountId::from(ALICE)), - local_asset_id.into(), - AccountId::from(ALICE).into(), - minimum_asset_balance - )); - - // create foreign asset - let foreign_asset_minimum_asset_balance = 3333333_u128; - assert_ok!(ForeignAssets::force_create( - RuntimeHelper::root_origin(), - foreign_asset_id_multilocation, - AccountId::from(SOME_ASSET_ADMIN).into(), - false, - foreign_asset_minimum_asset_balance - )); - - // We first mint enough asset for the account to exist for assets - assert_ok!(ForeignAssets::mint( - RuntimeHelper::origin_of(AccountId::from(SOME_ASSET_ADMIN)), - foreign_asset_id_multilocation, - AccountId::from(ALICE).into(), - 6 * foreign_asset_minimum_asset_balance - )); - - // check after - assert_eq!( - Assets::balance(local_asset_id, AccountId::from(ALICE)), - minimum_asset_balance - ); - assert_eq!( - ForeignAssets::balance(foreign_asset_id_multilocation, AccountId::from(ALICE)), - 6 * minimum_asset_balance - ); - assert_eq!(Balances::free_balance(AccountId::from(ALICE)), some_currency); - - let result: MultiAssets = Runtime::query_account_balances(AccountId::from(ALICE)) - .unwrap() - .try_into() - .unwrap(); - assert_eq!(result.len(), 3); - - // check currency - assert!(result.inner().iter().any(|asset| asset.eq( - &assets_common::fungible_conversion::convert_balance::( - some_currency - ) - .unwrap() - ))); - // check trusted asset - assert!(result.inner().iter().any(|asset| asset.eq(&( - AssetIdForTrustBackedAssetsConvert::convert_back(&local_asset_id).unwrap(), - minimum_asset_balance - ) - .into()))); - // check foreign asset - assert!(result.inner().iter().any(|asset| asset.eq(&( - Identity::convert_back(&foreign_asset_id_multilocation).unwrap(), - 6 * foreign_asset_minimum_asset_balance - ) - .into()))); - }); -} - -asset_test_utils::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - 1000 -); - -asset_test_utils::include_teleports_for_foreign_assets_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - ForeignCreatorsSovereignAccountOf, - ForeignAssetsInstance, - asset_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_local_consensus_currency_works!( - Runtime, - XcmConfig, - collator_session_keys(), - ExistentialDeposit::get(), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( - asset_transactor_transfer_with_trust_backed_assets_works, - Runtime, - XcmConfig, - TrustBackedAssetsInstance, - AssetIdForTrustBackedAssets, - AssetIdForTrustBackedAssetsConvert, - collator_session_keys(), - ExistentialDeposit::get(), - 12345, - Box::new(|| { - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_works!( - asset_transactor_transfer_with_foreign_assets_works, - Runtime, - XcmConfig, - ForeignAssetsInstance, - MultiLocation, - JustTry, - asset_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - MultiLocation { parents: 1, interior: X2(Parachain(1313), GeneralIndex(12345)) }, - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - }) -); - -asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_parachain_assets_works!( - Runtime, - XcmConfig, - WeightToFee, - ForeignCreatorsSovereignAccountOf, - ForeignAssetsInstance, - MultiLocation, - JustTry, - asset_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::ed25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - AssetDeposit::get(), - MetadataDepositBase::get(), - MetadataDepositPerByte::get(), - Box::new(|pallet_asset_call| RuntimeCall::ForeignAssets(pallet_asset_call).encode()), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::ForeignAssets(pallet_asset_event)) => Some(pallet_asset_event), - _ => None, - } - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert!(ForeignAssets::asset_ids().collect::>().is_empty()); - }), - Box::new(|| { - assert!(Assets::asset_ids().collect::>().is_empty()); - assert_eq!(ForeignAssets::asset_ids().collect::>().len(), 1); - }) -); - -#[test] -fn reserve_transfer_native_asset_to_non_teleport_para_works() { - asset_test_utils::test_cases::reserve_transfer_native_asset_to_non_teleport_para_works::< - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - ParachainSystem, - XcmpQueue, - LocationToAccountId, - >( - collator_session_keys(), - ExistentialDeposit::get(), - AccountId::from(ALICE), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::XcmpQueue(event)) => Some(event), - _ => None, - } - }), - WeightLimit::Unlimited, - ); -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml deleted file mode 100644 index 2cd002b1c60..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/Cargo.toml +++ /dev/null @@ -1,192 +0,0 @@ -[package] -name = "bridge-hub-kusama-runtime" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -description = "Kusama's BridgeHub parachain runtime" -license = "Apache-2.0" - -[build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1" } -log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } -smallvec = "1.11.0" - -# Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} - -# Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } - -[dev-dependencies] -bridge-hub-test-utils = { path = "../test-utils" } - -[features] -default = [ "std" ] -std = [ - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-session-benchmarking/std", - "cumulus-pallet-xcm/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-primitives-core/std", - "cumulus-primitives-utility/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "log/std", - "pallet-aura/std", - "pallet-authorship/std", - "pallet-balances/std", - "pallet-collator-selection/std", - "pallet-message-queue/std", - "pallet-multisig/std", - "pallet-session/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-utility/std", - "pallet-xcm-benchmarks?/std", - "pallet-xcm/std", - "parachain-info/std", - "parachains-common/std", - "polkadot-core-primitives/std", - "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", - "scale-info/std", - "serde", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-io/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "substrate-wasm-builder", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] - -runtime-benchmarks = [ - "cumulus-pallet-dmp-queue/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "cumulus-primitives-utility/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-collator-selection/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] - -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "cumulus-pallet-xcmp-queue/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-aura/try-runtime", - "pallet-authorship/try-runtime", - "pallet-balances/try-runtime", - "pallet-collator-selection/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-multisig/try-runtime", - "pallet-session/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-utility/try-runtime", - "pallet-xcm/try-runtime", - "parachain-info/try-runtime", - "polkadot-runtime-common/try-runtime", - "sp-runtime/try-runtime", -] - -experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/build.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/build.rs deleted file mode 100644 index 60f8a125129..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/build.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[cfg(feature = "std")] -fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} - -#[cfg(not(feature = "std"))] -fn main() {} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs deleted file mode 100644 index a511791d347..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/lib.rs +++ /dev/null @@ -1,872 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -mod weights; -pub mod xcm_config; - -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; -use frame_support::{ - construct_runtime, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - parameter_types, - traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, - }, - weights::{ConstantMultiplier, Weight}, - PalletId, -}; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, -}; -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{FellowshipLocation, GovernanceLocation, XcmOriginToTransactDispatchOrigin}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; - -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -use parachains_common::{ - impls::DealWithFees, - kusama::{consensus::*, currency::*, fee::WeightToFee}, - message_queue::{NarrowOriginToSibling, ParaIdToSibling}, - AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, - HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, -}; - -// XCM Imports -use xcm::latest::prelude::BodyId; - -/// The address format for describing accounts. -pub type Address = MultiAddress; - -/// Block type as expected by this runtime. -pub type Block = generic::Block; - -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; - -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; - -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, -); - -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; - -/// Migrations to apply on runtime upgrade. -pub type Migrations = ( - // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, -); - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, - Migrations, ->; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("bridge-hub-kusama"), - impl_name: create_runtime_str!("bridge-hub-kusama"), - authoring_version: 1, - spec_version: 1_004_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 3, - state_version: 1, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 2; -} - -// Configure FRAME pallets to include in runtime. - -impl frame_system::Config for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. - type Nonce = Nonce; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; - /// The block type. - type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Runtime version. - type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); - /// The weight of database operations that the runtime can invoke. - type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = weights::frame_system::WeightInfo; - /// Block & extrinsics weights: base values and limits. - type BlockWeights = RuntimeBlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = RuntimeBlockLength; - type SS58Prefix = SS58Prefix; - /// The action to take on a Runtime Upgrade - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = weights::pallet_timestamp::WeightInfo; -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. - type Balance = Balance; - type DustRemoval = (); - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = weights::pallet_balances::WeightInfo; - type MaxLocks = ConstU32<50>; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = MILLICENTS; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = - pallet_transaction_payment::CurrencyAdapter>; - type OperationalFeeMultiplier = ConstU8<5>; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = XcmpQueue; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -parameter_types! { - // Fellows pluralistic body. - pub const FellowsBodyId: BodyId = BodyId::Technical; -} - -/// Privileged origin that represents Root or Fellows pluralistic body. -pub type RootOrFellows = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = RootOrFellows; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - -pub const PERIOD: u32 = 6 * HOURS; -pub const OFFSET: u32 = 0; - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions, ConstU32>; - type NextSessionRotation = pallet_session::PeriodicSessions, ConstU32>; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = weights::pallet_session::WeightInfo; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow Root and the `StakingAdmin` to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = ConstU32; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = weights::pallet_collator_selection::WeightInfo; -} - -parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. - pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. - pub const DepositFactor: Balance = deposit(0, 32); -} - -impl pallet_multisig::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type DepositBase = DepositBase; - type DepositFactor = DepositFactor; - type MaxSignatories = ConstU32<100>; - type WeightInfo = weights::pallet_multisig::WeightInfo; -} - -impl pallet_utility::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type PalletsOrigin = OriginCaller; - type WeightInfo = weights::pallet_utility::WeightInfo; -} - -// Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime - { - // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, - - // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - - // Collator support. The order of these 4 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, - - // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, - } -); - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - frame_benchmarking::define_benchmarks!( - [frame_system, SystemBench::] - [pallet_balances, Balances] - [pallet_message_queue, MessageQueue] - [pallet_multisig, Multisig] - [pallet_session, SessionBench::] - [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - // XCM - [pallet_xcm, PalletXcmExtrinsicsBenchmark::] - // NOTE: Make sure you point to the individual modules below. - [pallet_xcm_benchmarks::fungible, XcmBalances] - [pallet_xcm_benchmarks::generic, XcmGeneric] - ); -} - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - - // This is defined once again in dispatch_benchmark, because list_benchmarks! - // and add_benchmarks! are macros exported by define_benchmarks! macros and those types - // are referenced in that call. - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; - use sp_storage::TrackedStorageKey; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - impl pallet_xcm::benchmarking::Config for Runtime { - fn reachable_dest() -> Option { - Some(Parent.into()) - } - - fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // Relay/native token can be teleported between BH and Relay. - Some(( - MultiAsset { - fun: Fungible(EXISTENTIAL_DEPOSIT), - id: Concrete(Parent.into()) - }, - Parent.into(), - )) - } - - fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // Reserve transfers are disabled on BH. - None - } - } - - use xcm::latest::prelude::*; - use xcm_config::KsmRelayLocation; - - parameter_types! { - pub ExistentialDepositMultiAsset: Option = Some(( - xcm_config::KsmRelayLocation::get(), - ExistentialDeposit::get() - ).into()); - } - - impl pallet_xcm_benchmarks::Config for Runtime { - type XcmConfig = xcm_config::XcmConfig; - type AccountIdConverter = xcm_config::LocationToAccountId; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositMultiAsset, - xcm_config::PriceForParentDelivery, - >; - fn valid_destination() -> Result { - Ok(KsmRelayLocation::get()) - } - fn worst_case_holding(_depositable_count: u32) -> MultiAssets { - // just concrete assets according to relay chain. - let assets: Vec = vec![ - MultiAsset { - id: Concrete(KsmRelayLocation::get()), - fun: Fungible(1_000_000 * UNITS), - } - ]; - assets.into() - } - } - - parameter_types! { - pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - KsmRelayLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(KsmRelayLocation::get()) }, - )); - pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; - } - - impl pallet_xcm_benchmarks::fungible::Config for Runtime { - type TransactAsset = Balances; - - type CheckedAccount = CheckedAccount; - type TrustedTeleporter = TrustedTeleporter; - type TrustedReserve = TrustedReserve; - - fn get_multi_asset() -> MultiAsset { - MultiAsset { - id: Concrete(KsmRelayLocation::get()), - fun: Fungible(UNITS), - } - } - } - - impl pallet_xcm_benchmarks::generic::Config for Runtime { - type TransactAsset = Balances; - type RuntimeCall = RuntimeCall; - - fn worst_case_response() -> (u64, Response) { - (0u64, Response::Version(Default::default())) - } - - fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((KsmRelayLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) - } - - fn subscribe_origin() -> Result { - Ok(KsmRelayLocation::get()) - } - - fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = KsmRelayLocation::get(); - let assets: MultiAssets = (Concrete(KsmRelayLocation::get()), 1_000 * UNITS).into(); - let ticket = MultiLocation { parents: 0, interior: Here }; - Ok((origin, ticket, assets)) - } - - fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn export_message_origin_and_destination( - ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - } - - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/block_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/block_weights.rs deleted file mode 100644 index e7fdb2aae2a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/block_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Importing a block with 0 Extrinsics. - pub const BlockExecutionWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::BlockExecutionWeight::get(); - - // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 100 µs." - ); - // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 50 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index a30a2ae8d4d..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// bridge-hub-kusama-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_686_000 picoseconds. - Weight::from_parts(1_761_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 28_250 - .saturating_add(Weight::from_parts(24_261_433, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs deleted file mode 100644 index ffd311ceecd..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_xcmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// bridge-hub-kusama-dev -// --output -// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_xcmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_u32() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 5_000_000 picoseconds. - Weight::from_parts(6_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn enqueue_xcmp_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `3517` - // Minimum execution time: 14_000_000 picoseconds. - Weight::from_parts(14_000_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn suspend_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(4_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn resume_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `1596` - // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) - .saturating_add(Weight::from_parts(0, 1596)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn take_first_concatenated_xcm() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) - /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65711` - // Estimated: `69176` - // Minimum execution time: 60_000_000 picoseconds. - Weight::from_parts(63_000_000, 0) - .saturating_add(Weight::from_parts(0, 69176)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65710` - // Estimated: `69175` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(43_000_000, 0) - .saturating_add(Weight::from_parts(0, 69175)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/extrinsic_weights.rs deleted file mode 100644 index 1a4adb968bb..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/extrinsic_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Executing a NO-OP `System::remarks` Extrinsic. - pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::ExtrinsicBaseWeight::get(); - - // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 10 µs." - ); - // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs deleted file mode 100644 index 6b9313cdaba..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_985_000 picoseconds. - Weight::from_parts(2_177_341, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(386, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_288_000 picoseconds. - Weight::from_parts(23_888_468, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_718, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_700_000 picoseconds. - Weight::from_parts(3_867_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 100_298_586_000 picoseconds. - Weight::from_parts(101_869_369_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_052_000 picoseconds. - Weight::from_parts(2_115_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_048 - .saturating_add(Weight::from_parts(755_436, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_044_000 picoseconds. - Weight::from_parts(2_110_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_011 - .saturating_add(Weight::from_parts(569_993, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `68 + p * (69 ±0)` - // Estimated: `66 + p * (70 ±0)` - // Minimum execution time: 3_741_000 picoseconds. - Weight::from_parts(3_838_000, 0) - .saturating_add(Weight::from_parts(0, 66)) - // Standard Error: 2_455 - .saturating_add(Weight::from_parts(1_216_154, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs deleted file mode 100644 index 36733d6d4a6..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/mod.rs +++ /dev/null @@ -1,41 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Expose the auto generated weight files. - -pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; -pub mod cumulus_pallet_parachain_system; -pub mod cumulus_pallet_xcmp_queue; -pub mod extrinsic_weights; -pub mod frame_system; -pub mod pallet_balances; -pub mod pallet_collator_selection; -pub mod pallet_message_queue; -pub mod pallet_multisig; -pub mod pallet_session; -pub mod pallet_timestamp; -pub mod pallet_utility; -pub mod pallet_xcm; -pub mod paritydb_weights; -pub mod rocksdb_weights; -pub mod xcm; - -pub use block_weights::constants::BlockExecutionWeight; -pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; -pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs deleted file mode 100644 index 04ceb5bed75..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_balances.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_balances` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_balances`. -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_allow_death() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 55_163_000 picoseconds. - Weight::from_parts(56_056_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 40_829_000 picoseconds. - Weight::from_parts(42_182_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_creating() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 15_212_000 picoseconds. - Weight::from_parts(15_782_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_killing() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 22_866_000 picoseconds. - Weight::from_parts(23_452_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 57_047_000 picoseconds. - Weight::from_parts(58_536_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_all() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 51_622_000 picoseconds. - Weight::from_parts(52_912_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_unreserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 17_723_000 picoseconds. - Weight::from_parts(18_383_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:999 w:999) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 17_089_000 picoseconds. - Weight::from_parts(17_379_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 17_071 - .saturating_add(Weight::from_parts(15_647_341, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs deleted file mode 100644 index 72d8ba4045a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `196 + b * (79 ±0)` - // Estimated: `1187 + b * (2555 ±0)` - // Minimum execution time: 14_329_000 picoseconds. - Weight::from_parts(11_605_842, 0) - .saturating_add(Weight::from_parts(0, 1187)) - // Standard Error: 4_784 - .saturating_add(Weight::from_parts(3_297_183, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `757 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 47_110_000 picoseconds. - Weight::from_parts(45_234_418, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 14_452 - .saturating_add(Weight::from_parts(156_031, 0).saturating_mul(b.into())) - // Standard Error: 2_739 - .saturating_add(Weight::from_parts(216_162, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 15_326_000 picoseconds. - Weight::from_parts(14_914_611, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_274 - .saturating_add(Weight::from_parts(201_234, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_288_000 picoseconds. - Weight::from_parts(7_472_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_137_000 picoseconds. - Weight::from_parts(7_374_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `740 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 40_718_000 picoseconds. - Weight::from_parts(43_911_837, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 3_053 - .saturating_add(Weight::from_parts(229_337, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[3, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `334 + c * (49 ±0)` - // Estimated: `6287` - // Minimum execution time: 32_953_000 picoseconds. - Weight::from_parts(34_817_275, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_476 - .saturating_add(Weight::from_parts(198_023, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn update_bond(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn take_candidate_slot(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `155` - // Estimated: `6196` - // Minimum execution time: 45_130_000 picoseconds. - Weight::from_parts(46_733_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2263 + c * (97 ±0) + r * (115 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 16_690_000 picoseconds. - Weight::from_parts(17_188_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 345_320 - .saturating_add(Weight::from_parts(15_166_422, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs deleted file mode 100644 index c5a4235055d..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// bridge-hub-kusama-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 11_692_000 picoseconds. - Weight::from_parts(11_692_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 10_614_000 picoseconds. - Weight::from_parts(10_614_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 7_085_000 picoseconds. - Weight::from_parts(7_085_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_813_000 picoseconds. - Weight::from_parts(5_813_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_090_000 picoseconds. - Weight::from_parts(6_090_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 58_905_000 picoseconds. - Weight::from_parts(58_905_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 6_501_000 picoseconds. - Weight::from_parts(6_501_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 39_695_000 picoseconds. - Weight::from_parts(39_695_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 50_543_000 picoseconds. - Weight::from_parts(50_543_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 69_294_000 picoseconds. - Weight::from_parts(69_294_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs deleted file mode 100644 index f4135e975fb..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_multisig` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_multisig`. -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_602_000 picoseconds. - Weight::from_parts(14_565_036, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 8 - .saturating_add(Weight::from_parts(518, 0).saturating_mul(z.into())) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 46_075_000 picoseconds. - Weight::from_parts(33_730_493, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_049 - .saturating_add(Weight::from_parts(134_211, 0).saturating_mul(s.into())) - // Standard Error: 10 - .saturating_add(Weight::from_parts(1_448, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 29_389_000 picoseconds. - Weight::from_parts(19_639_583, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 976 - .saturating_add(Weight::from_parts(106_598, 0).saturating_mul(s.into())) - // Standard Error: 9 - .saturating_add(Weight::from_parts(1_457, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `388 + s * (33 ±0)` - // Estimated: `6811` - // Minimum execution time: 50_438_000 picoseconds. - Weight::from_parts(36_195_308, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_689 - .saturating_add(Weight::from_parts(176_067, 0).saturating_mul(s.into())) - // Standard Error: 16 - .saturating_add(Weight::from_parts(1_545, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 32_134_000 picoseconds. - Weight::from_parts(32_149_785, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_082 - .saturating_add(Weight::from_parts(145_390, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 17_560_000 picoseconds. - Weight::from_parts(18_144_079, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 763 - .saturating_add(Weight::from_parts(114_298, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + s * (1 ±0)` - // Estimated: `6811` - // Minimum execution time: 32_360_000 picoseconds. - Weight::from_parts(33_566_579, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_314 - .saturating_add(Weight::from_parts(126_583, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs deleted file mode 100644 index f508e1daaef..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_session.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_session` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_session`. -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:1 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn set_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3762` - // Minimum execution time: 17_170_000 picoseconds. - Weight::from_parts(17_523_000, 0) - .saturating_add(Weight::from_parts(0, 3762)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:0 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn purge_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `279` - // Estimated: `3744` - // Minimum execution time: 13_273_000 picoseconds. - Weight::from_parts(14_200_000, 0) - .saturating_add(Weight::from_parts(0, 3744)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs deleted file mode 100644 index 6162b1d48c5..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `49` - // Estimated: `1493` - // Minimum execution time: 7_794_000 picoseconds. - Weight::from_parts(8_075_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_338_000 picoseconds. - Weight::from_parts(3_471_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs deleted file mode 100644 index 93d0ea596e7..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_641_000 picoseconds. - Weight::from_parts(7_103_558, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_172 - .saturating_add(Weight::from_parts(4_907_384, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_741_000 picoseconds. - Weight::from_parts(4_870_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_561_000 picoseconds. - Weight::from_parts(12_252_064, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_750 - .saturating_add(Weight::from_parts(5_193_404, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_646_000 picoseconds. - Weight::from_parts(8_927_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_726_000 picoseconds. - Weight::from_parts(8_025_954, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_746 - .saturating_add(Weight::from_parts(4_936_537, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs deleted file mode 100644 index 7f4c2026f2b..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/pallet_xcm.rs +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=bridge-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 22_520_000 picoseconds. - Weight::from_parts(23_167_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 19_639_000 picoseconds. - Weight::from_parts(20_230_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_175_000 picoseconds. - Weight::from_parts(7_496_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_default_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_126_000 picoseconds. - Weight::from_parts(2_359_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_subscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 27_229_000 picoseconds. - Weight::from_parts(27_673_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_unsubscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `255` - // Estimated: `3720` - // Minimum execution time: 29_812_000 picoseconds. - Weight::from_parts(30_649_000, 0) - .saturating_add(Weight::from_parts(0, 3720)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) - /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_suspension() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_212_000 picoseconds. - Weight::from_parts(2_367_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_supported_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_768_000 picoseconds. - Weight::from_parts(15_036_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notifiers() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_662_000 picoseconds. - Weight::from_parts(15_155_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn already_notified_target() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 16_198_000 picoseconds. - Weight::from_parts(16_456_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn notify_current_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `6046` - // Minimum execution time: 25_825_000 picoseconds. - Weight::from_parts(26_744_000, 0) - .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn notify_target_migration_fail() -> Weight { - // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_622_000 picoseconds. - Weight::from_parts(8_931_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notify_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 15_397_000 picoseconds. - Weight::from_parts(15_650_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn migrate_and_notify_old_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 32_330_000 picoseconds. - Weight::from_parts(33_255_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn new_query() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 4_142_000 picoseconds. - Weight::from_parts(4_308_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn take_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `7669` - // Estimated: `11134` - // Minimum execution time: 25_814_000 picoseconds. - Weight::from_parts(26_213_000, 0) - .saturating_add(Weight::from_parts(0, 11134)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/paritydb_weights.rs deleted file mode 100644 index 25679703831..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/paritydb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights - /// are available for brave runtime engineers who may want to try this out as default. - pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::ParityDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/rocksdb_weights.rs deleted file mode 100644 index 3dd817aa6f1..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/rocksdb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout - /// the runtime. - pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::RocksDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs deleted file mode 100644 index 71732961d3d..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/mod.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -mod pallet_xcm_benchmarks_fungible; -mod pallet_xcm_benchmarks_generic; - -use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; -use frame_support::weights::Weight; -use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; -use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; -use xcm::{latest::prelude::*, DoubleEncoded}; - -trait WeighMultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight; -} - -const MAX_ASSETS: u64 = 100; - -impl WeighMultiAssets for MultiAssetFilter { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - match self { - Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), - Self::Wild(asset) => match asset { - All => weight.saturating_mul(MAX_ASSETS), - AllOf { fun, .. } => match fun { - WildFungibility::Fungible => weight, - // Magic number 2 has to do with the fact that we could have up to 2 times - // MaxAssetsIntoHolding in the worst-case scenario. - WildFungibility::NonFungible => - weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), - }, - AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - }, - } - } -} - -impl WeighMultiAssets for MultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - weight.saturating_mul(self.inner().iter().count() as u64) - } -} - -pub struct BridgeHubKusamaXcmWeight(core::marker::PhantomData); -impl XcmWeightInfo for BridgeHubKusamaXcmWeight { - fn withdraw_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) - } - fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) - } - fn receive_teleported_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) - } - fn query_response( - _query_id: &u64, - _response: &Response, - _max_weight: &Weight, - _querier: &Option, - ) -> Weight { - XcmGeneric::::query_response() - } - fn transfer_asset(assets: &MultiAssets, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_asset()) - } - fn transfer_reserve_asset( - assets: &MultiAssets, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_reserve_asset()) - } - fn transact( - _origin_type: &OriginKind, - _require_weight_at_most: &Weight, - _call: &DoubleEncoded, - ) -> Weight { - XcmGeneric::::transact() - } - fn hrmp_new_channel_open_request( - _sender: &u32, - _max_message_size: &u32, - _max_capacity: &u32, - ) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_accepted(_recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn clear_origin() -> Weight { - XcmGeneric::::clear_origin() - } - fn descend_origin(_who: &InteriorMultiLocation) -> Weight { - XcmGeneric::::descend_origin() - } - fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_error() - } - - fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) - } - fn deposit_reserve_asset( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_reserve_asset()) - } - fn exchange_asset(_give: &MultiAssetFilter, _receive: &MultiAssets, _maximal: &bool) -> Weight { - Weight::MAX - } - fn initiate_reserve_withdraw( - assets: &MultiAssetFilter, - _reserve: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) - } - fn initiate_teleport( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) - } - fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { - XcmGeneric::::report_holding() - } - fn buy_execution(_fees: &MultiAsset, _weight_limit: &WeightLimit) -> Weight { - XcmGeneric::::buy_execution() - } - fn refund_surplus() -> Weight { - XcmGeneric::::refund_surplus() - } - fn set_error_handler(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_error_handler() - } - fn set_appendix(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_appendix() - } - fn clear_error() -> Weight { - XcmGeneric::::clear_error() - } - fn claim_asset(_assets: &MultiAssets, _ticket: &MultiLocation) -> Weight { - XcmGeneric::::claim_asset() - } - fn trap(_code: &u64) -> Weight { - XcmGeneric::::trap() - } - fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { - XcmGeneric::::subscribe_version() - } - fn unsubscribe_version() -> Weight { - XcmGeneric::::unsubscribe_version() - } - fn burn_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::burn_asset()) - } - fn expect_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::expect_asset()) - } - fn expect_origin(_origin: &Option) -> Weight { - XcmGeneric::::expect_origin() - } - fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { - XcmGeneric::::expect_error() - } - fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { - XcmGeneric::::expect_transact_status() - } - fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::query_pallet() - } - fn expect_pallet( - _index: &u32, - _name: &Vec, - _module_name: &Vec, - _crate_major: &u32, - _min_crate_minor: &u32, - ) -> Weight { - XcmGeneric::::expect_pallet() - } - fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_transact_status() - } - fn clear_transact_status() -> Weight { - XcmGeneric::::clear_transact_status() - } - fn universal_origin(_: &Junction) -> Weight { - Weight::MAX - } - fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { - Weight::MAX - } - fn lock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn unlock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn note_unlockable(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn request_unlock(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn set_fees_mode(_: &bool) -> Weight { - XcmGeneric::::set_fees_mode() - } - fn set_topic(_topic: &[u8; 32]) -> Weight { - XcmGeneric::::set_topic() - } - fn clear_topic() -> Weight { - XcmGeneric::::clear_topic() - } - fn alias_origin(_: &MultiLocation) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX - } - fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { - XcmGeneric::::unpaid_execution() - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs deleted file mode 100644 index ff3cb452a8a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::fungible -// --chain=bridge-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::fungible`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn withdraw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 25_447_000 picoseconds. - Weight::from_parts(25_810_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn transfer_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `153` - // Estimated: `6196` - // Minimum execution time: 53_908_000 picoseconds. - Weight::from_parts(54_568_000, 6196) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn transfer_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `223` - // Estimated: `6196` - // Minimum execution time: 79_923_000 picoseconds. - Weight::from_parts(80_790_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) - } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn reserve_asset_deposited() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_reserve_withdraw() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 31_923_000 picoseconds. - Weight::from_parts(32_499_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn receive_teleported_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_903_000 picoseconds. - Weight::from_parts(4_065_000, 0) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn deposit_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3593` - // Minimum execution time: 26_987_000 picoseconds. - Weight::from_parts(27_486_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn deposit_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `122` - // Estimated: `3593` - // Minimum execution time: 56_012_000 picoseconds. - Weight::from_parts(58_067_000, 3593) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_teleport() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 32_350_000 picoseconds. - Weight::from_parts(33_403_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs deleted file mode 100644 index c5c14e6917e..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::generic` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-kusama-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=bridge-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::generic`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_holding() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 33_141_000 picoseconds. - Weight::from_parts(34_380_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn buy_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_803_000 picoseconds. - Weight::from_parts(2_904_000, 0) - } - // Storage: `PolkadotXcm::Queries` (r:1 w:0) - // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn query_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `3497` - // Minimum execution time: 10_308_000 picoseconds. - Weight::from_parts(10_753_000, 3497) - .saturating_add(T::DbWeight::get().reads(1)) - } - pub fn transact() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_499_000 picoseconds. - Weight::from_parts(11_786_000, 0) - } - pub fn refund_surplus() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_102_000 picoseconds. - Weight::from_parts(3_161_000, 0) - } - pub fn set_error_handler() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_678_000 picoseconds. - Weight::from_parts(2_795_000, 0) - } - pub fn set_appendix() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_685_000 picoseconds. - Weight::from_parts(2_758_000, 0) - } - pub fn clear_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_590_000 picoseconds. - Weight::from_parts(2_754_000, 0) - } - pub fn descend_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_297_000 picoseconds. - Weight::from_parts(3_419_000, 0) - } - pub fn clear_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_606_000 picoseconds. - Weight::from_parts(2_717_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 26_242_000 picoseconds. - Weight::from_parts(29_220_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) - // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn claim_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `3555` - // Minimum execution time: 14_106_000 picoseconds. - Weight::from_parts(14_535_000, 3555) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn trap() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_634_000 picoseconds. - Weight::from_parts(2_763_000, 0) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn subscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 27_802_000 picoseconds. - Weight::from_parts(28_495_000, 3503) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn unsubscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_683_000 picoseconds. - Weight::from_parts(4_907_000, 0) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn burn_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_941_000 picoseconds. - Weight::from_parts(4_080_000, 0) - } - pub fn expect_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_775_000 picoseconds. - Weight::from_parts(2_908_000, 0) - } - pub fn expect_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_743_000 picoseconds. - Weight::from_parts(2_863_000, 0) - } - pub fn expect_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_641_000 picoseconds. - Weight::from_parts(2_771_000, 0) - } - pub fn expect_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_838_000 picoseconds. - Weight::from_parts(2_950_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn query_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 29_284_000 picoseconds. - Weight::from_parts(29_867_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn expect_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_734_000 picoseconds. - Weight::from_parts(4_876_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 26_154_000 picoseconds. - Weight::from_parts(26_851_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn clear_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_678_000 picoseconds. - Weight::from_parts(2_748_000, 0) - } - pub fn set_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_585_000 picoseconds. - Weight::from_parts(2_697_000, 0) - } - pub fn clear_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_576_000 picoseconds. - Weight::from_parts(2_701_000, 0) - } - pub fn set_fees_mode() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_597_000 picoseconds. - Weight::from_parts(2_735_000, 0) - } - pub fn unpaid_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_744_000 picoseconds. - Weight::from_parts(2_809_000, 0) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs deleted file mode 100644 index b3703eee830..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/src/xcm_config.rs +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -use super::{ - AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, - CENTS, -}; -use frame_support::{ - match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing}, -}; -use frame_system::EnsureRoot; -use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; -use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use xcm::latest::prelude::*; -use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, -}; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; - -parameter_types! { - pub const KsmRelayLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Kusama); - pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorMultiLocation = - X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; - pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); - pub const FellowshipLocation: MultiLocation = MultiLocation::parent(); -} - -/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used -/// when determining ownership of accounts for asset transacting and when attempting to use XCM -/// `Transact` in order to determine the dispatch Origin. -pub type LocationToAccountId = ( - // The parent (Relay-chain) origin converts to the parent `AccountId`. - ParentIsPreset, - // Sibling parachain origins convert to AccountId via the `ParaId::into`. - SiblingParachainConvertsVia, - // Straight up local `AccountId32` origins just alias directly to `AccountId`. - AccountId32Aliases, -); - -/// Means for transacting the native currency on this chain. -pub type CurrencyTransactor = CurrencyAdapter< - // Use this currency: - Balances, - // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We don't track any teleports of `Balances`. - (), ->; - -/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// biases the kind of local `Origin` it will become. -pub type XcmOriginToTransactDispatchOrigin = ( - // Sovereign account converter; this attempts to derive an `AccountId` from the origin location - // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. - SovereignSignedViaLocation, - // Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when - // recognized. - RelayChainAsNative, - // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when - // recognized. - SiblingParachainAsNative, - // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a - // transaction from the Root origin. - ParentAsSuperuser, - // Native signed account converter; this just converts an `AccountId32` origin into a normal - // `RuntimeOrigin::Signed` origin of the same 32-byte value. - SignedAccountId32AsNative, - // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. - XcmPassthrough, -); - -match_types! { - pub type ParentOrParentsPlurality: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(Plurality { .. }) } - }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; -} -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) - ) - } -} - -pub type Barrier = TrailingSetTopicAsId< - DenyThenTry< - DenyReserveTransferToRelayChain, - ( - // Allow local users to buy weight credit. - TakeWeightCredit, - // Expected responses are OK. - AllowKnownQueryResponses, - WithComputedOrigin< - ( - // If the message is one that immediately attempts to pay for execution, then - // allow it. - AllowTopLevelPaidExecutionFrom, - // Parent and its pluralities (i.e. governance bodies) get free execution. - AllowExplicitUnpaidExecutionFrom, - // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, - ), - UniversalLocation, - ConstU32<8>, - >, - ), - >, ->; - -/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: -/// - KSM with the parent Relay Chain and sibling parachains. -pub type TrustedTeleporters = ConcreteAssetFromSystem; - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = CurrencyTransactor; - type OriginConverter = XcmOriginToTransactDispatchOrigin; - // BridgeHub does not recognize a reserve location for any asset. Users must teleport KSM - // where allowed (e.g. with the Relay Chain). - type IsReserve = (); - type IsTeleporter = TrustedTeleporters; - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = WeightInfoBounds< - crate::weights::xcm::BridgeHubKusamaXcmWeight, - RuntimeCall, - MaxInstructions, - >; - type Trader = - UsingComponents>; - type ResponseHandler = PolkadotXcm; - type AssetTrap = PolkadotXcm; - type AssetClaims = PolkadotXcm; - type SubscriptionService = PolkadotXcm; - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; - type Aliasers = Nothing; -} - -/// Converts a local signed origin into an XCM multilocation. -/// Forms the basis for local origins sending/executing XCMs. -pub type LocalOriginToLocation = SignedToAccountId32; - -parameter_types! { - /// The asset ID for the asset that we use to pay for message delivery fees. - pub FeeAssetId: AssetId = Concrete(KsmRelayLocation::get()); - /// The base fee for the message delivery fees. - pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); -} - -pub type PriceForParentDelivery = - ExponentialPrice; - -/// The means for routing XCM messages which are not for local execution into the right message -/// queues. -pub type XcmRouter = WithUniqueTopic<( - // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, - // ..and XCMP to communicate with the sibling chains. - XcmpQueue, -)>; - -impl pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - // We want to disallow users sending (arbitrary) XCMs from this chain. - type SendXcmOrigin = EnsureXcmOrigin; - type XcmRouter = XcmRouter; - // We support local origins dispatching XCM executions in principle... - type ExecuteXcmOrigin = EnsureXcmOrigin; - // ... but disallow generic XCM execution. As a result only teleports are allowed. - type XcmExecuteFilter = Nothing; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. - type Weigher = WeightInfoBounds< - crate::weights::xcm::BridgeHubKusamaXcmWeight, - RuntimeCall, - MaxInstructions, - >; - type UniversalLocation = UniversalLocation; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; - type Currency = Balances; - type CurrencyMatcher = (); - type TrustedLockers = (); - type SovereignAccountOf = LocationToAccountId; - type MaxLockers = ConstU32<8>; - type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - type AdminOrigin = EnsureRoot; - type MaxRemoteLockConsumers = ConstU32<0>; - type RemoteLockConsumerIdentifier = (); -} - -impl cumulus_pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs deleted file mode 100644 index 36d8f0846af..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-kusama/tests/tests.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -pub use bridge_hub_kusama_runtime::{ - xcm_config::XcmConfig, AllPalletsWithoutSystem, Balances, ExistentialDeposit, ParachainSystem, - PolkadotXcm, Runtime, RuntimeEvent, SessionKeys, -}; -use codec::Decode; -use frame_support::parameter_types; -use parachains_common::{kusama::fee::WeightToFee, AccountId, AuraId}; - -const ALICE: [u8; 32] = [1u8; 32]; - -parameter_types! { - pub CheckingAccount: AccountId = PolkadotXcm::check_account(); -} - -bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - bridge_hub_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - 1002 -); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml deleted file mode 100644 index 3847a352e07..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/Cargo.toml +++ /dev/null @@ -1,192 +0,0 @@ -[package] -name = "bridge-hub-polkadot-runtime" -version = "0.1.0" -authors.workspace = true -edition.workspace = true -description = "Polkadot's BridgeHub parachain runtime" -license = "Apache-2.0" - -[build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -hex-literal = { version = "0.4.1" } -log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } -smallvec = "1.11.0" - -# Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} - -# Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } - -[dev-dependencies] -bridge-hub-test-utils = { path = "../test-utils" } - -[features] -default = [ "std" ] -std = [ - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-session-benchmarking/std", - "cumulus-pallet-xcm/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-primitives-core/std", - "cumulus-primitives-utility/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "log/std", - "pallet-aura/std", - "pallet-authorship/std", - "pallet-balances/std", - "pallet-collator-selection/std", - "pallet-message-queue/std", - "pallet-multisig/std", - "pallet-session/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-utility/std", - "pallet-xcm-benchmarks?/std", - "pallet-xcm/std", - "parachain-info/std", - "parachains-common/std", - "polkadot-core-primitives/std", - "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", - "scale-info/std", - "serde", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-io/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "substrate-wasm-builder", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] - -runtime-benchmarks = [ - "cumulus-pallet-dmp-queue/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "cumulus-primitives-utility/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-collator-selection/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-xcm-benchmarks/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] - -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "cumulus-pallet-xcmp-queue/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-aura/try-runtime", - "pallet-authorship/try-runtime", - "pallet-balances/try-runtime", - "pallet-collator-selection/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-multisig/try-runtime", - "pallet-session/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-utility/try-runtime", - "pallet-xcm/try-runtime", - "parachain-info/try-runtime", - "polkadot-runtime-common/try-runtime", - "sp-runtime/try-runtime", -] - -experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/build.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/build.rs deleted file mode 100644 index 60f8a125129..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/build.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[cfg(feature = "std")] -fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} - -#[cfg(not(feature = "std"))] -fn main() {} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs deleted file mode 100644 index e5c4f00f28e..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/lib.rs +++ /dev/null @@ -1,873 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -mod weights; -pub mod xcm_config; - -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; -use frame_support::{ - construct_runtime, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - parameter_types, - traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, - }, - weights::{ConstantMultiplier, Weight}, - PalletId, -}; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, -}; -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -pub use sp_runtime::{MultiAddress, Perbill, Permill}; -use xcm_config::{FellowshipLocation, GovernanceLocation, XcmOriginToTransactDispatchOrigin}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -// Polkadot imports -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; - -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -use parachains_common::{ - impls::DealWithFees, - message_queue::{NarrowOriginToSibling, ParaIdToSibling}, - polkadot::{consensus::*, currency::*, fee::WeightToFee}, - AccountId, Balance, BlockNumber, Hash, Header, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, - HOURS, MAXIMUM_BLOCK_WEIGHT, NORMAL_DISPATCH_RATIO, SLOT_DURATION, -}; -// XCM Imports -use xcm::latest::prelude::BodyId; - -/// The address format for describing accounts. -pub type Address = MultiAddress; - -/// Block type as expected by this runtime. -pub type Block = generic::Block; - -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; - -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; - -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, -); - -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; - -/// Migrations to apply on runtime upgrade. -pub type Migrations = ( - // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, -); - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, - Migrations, ->; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("bridge-hub-polkadot"), - impl_name: create_runtime_str!("bridge-hub-polkadot"), - authoring_version: 1, - spec_version: 1_004_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 2, - state_version: 1, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 0; -} - -// Configure FRAME pallets to include in runtime. - -impl frame_system::Config for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. - type Nonce = Nonce; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; - /// The block type. - type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Runtime version. - type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); - /// The weight of database operations that the runtime can invoke. - type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = weights::frame_system::WeightInfo; - /// Block & extrinsics weights: base values and limits. - type BlockWeights = RuntimeBlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = RuntimeBlockLength; - type SS58Prefix = SS58Prefix; - /// The action to take on a Runtime Upgrade - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = weights::pallet_timestamp::WeightInfo; -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. - type Balance = Balance; - type DustRemoval = (); - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = weights::pallet_balances::WeightInfo; - type MaxLocks = ConstU32<50>; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<0>; - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = MILLICENTS; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = - pallet_transaction_payment::CurrencyAdapter>; - type OperationalFeeMultiplier = ConstU8<5>; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = XcmpQueue; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl parachain_info::Config for Runtime {} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -parameter_types! { - // Fellows pluralistic body. - pub const FellowsBodyId: BodyId = BodyId::Technical; -} - -/// Privileged origin that represents Root or Fellows. -pub type RootOrFellows = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = RootOrFellows; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - -pub const PERIOD: u32 = 6 * HOURS; -pub const OFFSET: u32 = 0; - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions, ConstU32>; - type NextSessionRotation = pallet_session::PeriodicSessions, ConstU32>; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = weights::pallet_session::WeightInfo; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // StakingAdmin pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root, the StakingAdmin to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = ConstU32; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = weights::pallet_collator_selection::WeightInfo; -} - -parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. - pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. - pub const DepositFactor: Balance = deposit(0, 32); -} - -impl pallet_multisig::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type DepositBase = DepositBase; - type DepositFactor = DepositFactor; - type MaxSignatories = ConstU32<100>; - type WeightInfo = weights::pallet_multisig::WeightInfo; -} - -impl pallet_utility::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type PalletsOrigin = OriginCaller; - type WeightInfo = weights::pallet_utility::WeightInfo; -} - -// Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime - { - // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, - - // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - - // Collator support. The order of these 4 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, - - // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, - } -); - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - frame_benchmarking::define_benchmarks!( - [frame_system, SystemBench::] - [pallet_balances, Balances] - [pallet_message_queue, MessageQueue] - [pallet_multisig, Multisig] - [pallet_session, SessionBench::] - [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_parachain_system, ParachainSystem] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - // XCM - [pallet_xcm, PalletXcmExtrinsicsBenchmark::] - // NOTE: Make sure you point to the individual modules below. - [pallet_xcm_benchmarks::fungible, XcmBalances] - [pallet_xcm_benchmarks::generic, XcmGeneric] - ); -} - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - - // This is defined once again in dispatch_benchmark, because list_benchmarks! - // and add_benchmarks! are macros exported by define_benchmarks! macros and those types - // are referenced in that call. - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; - use sp_storage::TrackedStorageKey; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - impl pallet_xcm::benchmarking::Config for Runtime { - fn reachable_dest() -> Option { - Some(Parent.into()) - } - - fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // Relay/native token can be teleported between BH and Relay. - Some(( - MultiAsset { - fun: Fungible(EXISTENTIAL_DEPOSIT), - id: Concrete(Parent.into()) - }, - Parent.into(), - )) - } - - fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // Reserve transfers are disabled on BH. - None - } - } - - use xcm::latest::prelude::*; - use xcm_config::DotRelayLocation; - - parameter_types! { - pub ExistentialDepositMultiAsset: Option = Some(( - xcm_config::DotRelayLocation::get(), - ExistentialDeposit::get() - ).into()); - } - - impl pallet_xcm_benchmarks::Config for Runtime { - type XcmConfig = xcm_config::XcmConfig; - type AccountIdConverter = xcm_config::LocationToAccountId; - type DeliveryHelper = cumulus_primitives_utility::ToParentDeliveryHelper< - xcm_config::XcmConfig, - ExistentialDepositMultiAsset, - xcm_config::PriceForParentDelivery, - >; - fn valid_destination() -> Result { - Ok(DotRelayLocation::get()) - } - fn worst_case_holding(_depositable_count: u32) -> MultiAssets { - // just concrete assets according to relay chain. - let assets: Vec = vec![ - MultiAsset { - id: Concrete(DotRelayLocation::get()), - fun: Fungible(1_000_000 * UNITS), - } - ]; - assets.into() - } - } - - parameter_types! { - pub const TrustedTeleporter: Option<(MultiLocation, MultiAsset)> = Some(( - DotRelayLocation::get(), - MultiAsset { fun: Fungible(UNITS), id: Concrete(DotRelayLocation::get()) }, - )); - pub const CheckedAccount: Option<(AccountId, xcm_builder::MintLocation)> = None; - pub const TrustedReserve: Option<(MultiLocation, MultiAsset)> = None; - } - - impl pallet_xcm_benchmarks::fungible::Config for Runtime { - type TransactAsset = Balances; - - type CheckedAccount = CheckedAccount; - type TrustedTeleporter = TrustedTeleporter; - type TrustedReserve = TrustedReserve; - - fn get_multi_asset() -> MultiAsset { - MultiAsset { - id: Concrete(DotRelayLocation::get()), - fun: Fungible(UNITS), - } - } - } - - impl pallet_xcm_benchmarks::generic::Config for Runtime { - type TransactAsset = Balances; - type RuntimeCall = RuntimeCall; - - fn worst_case_response() -> (u64, Response) { - (0u64, Response::Version(Default::default())) - } - - fn worst_case_asset_exchange() -> Result<(MultiAssets, MultiAssets), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn universal_alias() -> Result<(MultiLocation, Junction), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn transact_origin_and_runtime_call() -> Result<(MultiLocation, RuntimeCall), BenchmarkError> { - Ok((DotRelayLocation::get(), frame_system::Call::remark_with_event { remark: vec![] }.into())) - } - - fn subscribe_origin() -> Result { - Ok(DotRelayLocation::get()) - } - - fn claimable_asset() -> Result<(MultiLocation, MultiLocation, MultiAssets), BenchmarkError> { - let origin = DotRelayLocation::get(); - let assets: MultiAssets = (Concrete(DotRelayLocation::get()), 1_000 * UNITS).into(); - let ticket = MultiLocation { parents: 0, interior: Here }; - Ok((origin, ticket, assets)) - } - - fn unlockable_asset() -> Result<(MultiLocation, MultiLocation, MultiAsset), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn export_message_origin_and_destination( - ) -> Result<(MultiLocation, NetworkId, InteriorMultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - - fn alias_origin() -> Result<(MultiLocation, MultiLocation), BenchmarkError> { - Err(BenchmarkError::Skip) - } - } - - type XcmBalances = pallet_xcm_benchmarks::fungible::Pallet::; - type XcmGeneric = pallet_xcm_benchmarks::generic::Pallet::; - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/block_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/block_weights.rs deleted file mode 100644 index e7fdb2aae2a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/block_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Importing a block with 0 Extrinsics. - pub const BlockExecutionWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::BlockExecutionWeight::get(); - - // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 100 µs." - ); - // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 50 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index 4b0cface146..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// bridge-hub-polkadot-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_686_000 picoseconds. - Weight::from_parts(1_729_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 19_565 - .saturating_add(Weight::from_parts(24_482_828, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs deleted file mode 100644 index ac6ad093faf..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_xcmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// bridge-hub-polkadot-dev -// --output -// cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_xcmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_u32() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 6_000_000 picoseconds. - Weight::from_parts(6_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn enqueue_xcmp_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `82` - // Estimated: `3517` - // Minimum execution time: 15_000_000 picoseconds. - Weight::from_parts(15_000_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn suspend_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `1561` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(4_000_000, 0) - .saturating_add(Weight::from_parts(0, 1561)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn resume_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `111` - // Estimated: `1596` - // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) - .saturating_add(Weight::from_parts(0, 1596)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn take_first_concatenated_xcm() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) - /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65711` - // Estimated: `69176` - // Minimum execution time: 61_000_000 picoseconds. - Weight::from_parts(64_000_000, 0) - .saturating_add(Weight::from_parts(0, 69176)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65710` - // Estimated: `69175` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(44_000_000, 0) - .saturating_add(Weight::from_parts(0, 69175)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/extrinsic_weights.rs deleted file mode 100644 index 1a4adb968bb..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/extrinsic_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Executing a NO-OP `System::remarks` Extrinsic. - pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::ExtrinsicBaseWeight::get(); - - // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 10 µs." - ); - // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs deleted file mode 100644 index 8676be67b2f..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_047_000 picoseconds. - Weight::from_parts(2_087_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(390, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_335_000 picoseconds. - Weight::from_parts(7_507_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_751, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_673_000 picoseconds. - Weight::from_parts(3_953_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 98_791_992_000 picoseconds. - Weight::from_parts(101_799_041_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_144_000 picoseconds. - Weight::from_parts(2_206_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_254 - .saturating_add(Weight::from_parts(740_881, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_117_000 picoseconds. - Weight::from_parts(2_192_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_024 - .saturating_add(Weight::from_parts(558_397, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `68 + p * (69 ±0)` - // Estimated: `66 + p * (70 ±0)` - // Minimum execution time: 3_907_000 picoseconds. - Weight::from_parts(4_050_000, 0) - .saturating_add(Weight::from_parts(0, 66)) - // Standard Error: 2_228 - .saturating_add(Weight::from_parts(1_212_760, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs deleted file mode 100644 index 36733d6d4a6..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/mod.rs +++ /dev/null @@ -1,41 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Expose the auto generated weight files. - -pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; -pub mod cumulus_pallet_parachain_system; -pub mod cumulus_pallet_xcmp_queue; -pub mod extrinsic_weights; -pub mod frame_system; -pub mod pallet_balances; -pub mod pallet_collator_selection; -pub mod pallet_message_queue; -pub mod pallet_multisig; -pub mod pallet_session; -pub mod pallet_timestamp; -pub mod pallet_utility; -pub mod pallet_xcm; -pub mod paritydb_weights; -pub mod rocksdb_weights; -pub mod xcm; - -pub use block_weights::constants::BlockExecutionWeight; -pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; -pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs deleted file mode 100644 index b95ea83585f..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_balances.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_balances` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_balances`. -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_allow_death() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 54_518_000 picoseconds. - Weight::from_parts(55_244_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 40_152_000 picoseconds. - Weight::from_parts(41_084_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_creating() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 15_234_000 picoseconds. - Weight::from_parts(15_576_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_killing() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 22_173_000 picoseconds. - Weight::from_parts(22_964_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 56_636_000 picoseconds. - Weight::from_parts(57_316_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_all() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 50_829_000 picoseconds. - Weight::from_parts(51_264_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_unreserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `3593` - // Minimum execution time: 17_887_000 picoseconds. - Weight::from_parts(18_365_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:999 w:999) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 16_754_000 picoseconds. - Weight::from_parts(17_237_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 15_088 - .saturating_add(Weight::from_parts(15_392_959, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs deleted file mode 100644 index f7c78f7db82..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `196 + b * (79 ±0)` - // Estimated: `1187 + b * (2555 ±0)` - // Minimum execution time: 14_735_000 picoseconds. - Weight::from_parts(11_846_916, 0) - .saturating_add(Weight::from_parts(0, 1187)) - // Standard Error: 8_592 - .saturating_add(Weight::from_parts(3_270_517, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `757 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 48_332_000 picoseconds. - Weight::from_parts(46_158_586, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 13_938 - .saturating_add(Weight::from_parts(174_493, 0).saturating_mul(b.into())) - // Standard Error: 2_642 - .saturating_add(Weight::from_parts(196_691, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 15_323_000 picoseconds. - Weight::from_parts(15_016_873, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_970 - .saturating_add(Weight::from_parts(199_160, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_393_000 picoseconds. - Weight::from_parts(7_723_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_426_000 picoseconds. - Weight::from_parts(7_783_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `740 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 41_040_000 picoseconds. - Weight::from_parts(43_902_200, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_360 - .saturating_add(Weight::from_parts(211_897, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[3, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `334 + c * (49 ±0)` - // Estimated: `6287` - // Minimum execution time: 33_429_000 picoseconds. - Weight::from_parts(36_413_045, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_947 - .saturating_add(Weight::from_parts(177_461, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn update_bond(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn take_candidate_slot(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `155` - // Estimated: `6196` - // Minimum execution time: 45_300_000 picoseconds. - Weight::from_parts(46_280_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2263 + c * (97 ±0) + r * (115 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 17_524_000 picoseconds. - Weight::from_parts(17_590_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 354_091 - .saturating_add(Weight::from_parts(15_829_767, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs deleted file mode 100644 index 38cc21cfad9..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// bridge-hub-polkadot-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 38_974_000 picoseconds. - Weight::from_parts(38_974_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 11_194_000 picoseconds. - Weight::from_parts(11_194_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 5_196_000 picoseconds. - Weight::from_parts(5_196_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_408_000 picoseconds. - Weight::from_parts(6_408_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_354_000 picoseconds. - Weight::from_parts(6_354_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 63_855_000 picoseconds. - Weight::from_parts(63_855_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 6_764_000 picoseconds. - Weight::from_parts(6_764_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 40_293_000 picoseconds. - Weight::from_parts(40_293_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 50_903_000 picoseconds. - Weight::from_parts(50_903_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 96_657_000 picoseconds. - Weight::from_parts(96_657_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs deleted file mode 100644 index 44f3da351f6..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_multisig` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_multisig`. -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_284_000 picoseconds. - Weight::from_parts(14_761_699, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7 - .saturating_add(Weight::from_parts(491, 0).saturating_mul(z.into())) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 44_043_000 picoseconds. - Weight::from_parts(32_303_705, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_280 - .saturating_add(Weight::from_parts(133_233, 0).saturating_mul(s.into())) - // Standard Error: 12 - .saturating_add(Weight::from_parts(1_467, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 28_494_000 picoseconds. - Weight::from_parts(19_053_318, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 791 - .saturating_add(Weight::from_parts(112_935, 0).saturating_mul(s.into())) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_427, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `388 + s * (33 ±0)` - // Estimated: `6811` - // Minimum execution time: 49_505_000 picoseconds. - Weight::from_parts(36_407_515, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_595 - .saturating_add(Weight::from_parts(166_201, 0).saturating_mul(s.into())) - // Standard Error: 15 - .saturating_add(Weight::from_parts(1_481, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `263 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 30_977_000 picoseconds. - Weight::from_parts(32_222_158, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_872 - .saturating_add(Weight::from_parts(125_197, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `282` - // Estimated: `6811` - // Minimum execution time: 17_351_000 picoseconds. - Weight::from_parts(18_130_793, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 902 - .saturating_add(Weight::from_parts(109_485, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + s * (1 ±0)` - // Estimated: `6811` - // Minimum execution time: 31_554_000 picoseconds. - Weight::from_parts(33_116_785, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 882 - .saturating_add(Weight::from_parts(119_357, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs deleted file mode 100644 index 86ecc787e97..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_session.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_session` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_session`. -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:1 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn set_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `3762` - // Minimum execution time: 16_905_000 picoseconds. - Weight::from_parts(17_310_000, 0) - .saturating_add(Weight::from_parts(0, 3762)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:0 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn purge_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `279` - // Estimated: `3744` - // Minimum execution time: 12_511_000 picoseconds. - Weight::from_parts(13_055_000, 0) - .saturating_add(Weight::from_parts(0, 3744)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs deleted file mode 100644 index a0984d72aac..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `49` - // Estimated: `1493` - // Minimum execution time: 7_675_000 picoseconds. - Weight::from_parts(7_947_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_342_000 picoseconds. - Weight::from_parts(3_443_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs deleted file mode 100644 index 2f04094b347..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_810_000 picoseconds. - Weight::from_parts(6_290_871, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_678 - .saturating_add(Weight::from_parts(5_193_419, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_753_000 picoseconds. - Weight::from_parts(4_890_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_873_000 picoseconds. - Weight::from_parts(9_780_422, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_035 - .saturating_add(Weight::from_parts(5_473_943, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_443_000 picoseconds. - Weight::from_parts(8_904_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_820_000 picoseconds. - Weight::from_parts(8_206_355, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_327 - .saturating_add(Weight::from_parts(5_187_839, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs deleted file mode 100644 index b73c009cbda..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/pallet_xcm.rs +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("bridge-hub-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=bridge-hub-polkadot-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 22_442_000 picoseconds. - Weight::from_parts(23_346_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1489` - // Minimum execution time: 19_655_000 picoseconds. - Weight::from_parts(20_086_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_858_000 picoseconds. - Weight::from_parts(7_225_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_default_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_099_000 picoseconds. - Weight::from_parts(2_190_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_subscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 27_073_000 picoseconds. - Weight::from_parts(27_584_000, 0) - .saturating_add(Weight::from_parts(0, 3503)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_unsubscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `255` - // Estimated: `3720` - // Minimum execution time: 29_949_000 picoseconds. - Weight::from_parts(30_760_000, 0) - .saturating_add(Weight::from_parts(0, 3720)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) - /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_suspension() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_192_000 picoseconds. - Weight::from_parts(2_276_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_supported_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `95` - // Estimated: `10985` - // Minimum execution time: 14_681_000 picoseconds. - Weight::from_parts(15_131_000, 0) - .saturating_add(Weight::from_parts(0, 10985)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notifiers() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `10989` - // Minimum execution time: 14_523_000 picoseconds. - Weight::from_parts(15_113_000, 0) - .saturating_add(Weight::from_parts(0, 10989)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn already_notified_target() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `13471` - // Minimum execution time: 15_989_000 picoseconds. - Weight::from_parts(16_518_000, 0) - .saturating_add(Weight::from_parts(0, 13471)) - .saturating_add(T::DbWeight::get().reads(5)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn notify_current_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `6046` - // Minimum execution time: 25_127_000 picoseconds. - Weight::from_parts(25_773_000, 0) - .saturating_add(Weight::from_parts(0, 6046)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn notify_target_migration_fail() -> Weight { - // Proof Size summary in bytes: - // Measured: `136` - // Estimated: `8551` - // Minimum execution time: 8_352_000 picoseconds. - Weight::from_parts(8_592_000, 0) - .saturating_add(Weight::from_parts(0, 8551)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notify_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `106` - // Estimated: `10996` - // Minimum execution time: 14_658_000 picoseconds. - Weight::from_parts(15_345_000, 0) - .saturating_add(Weight::from_parts(0, 10996)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn migrate_and_notify_old_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `112` - // Estimated: `11002` - // Minimum execution time: 31_478_000 picoseconds. - Weight::from_parts(32_669_000, 0) - .saturating_add(Weight::from_parts(0, 11002)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn new_query() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `1517` - // Minimum execution time: 4_066_000 picoseconds. - Weight::from_parts(4_267_000, 0) - .saturating_add(Weight::from_parts(0, 1517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn take_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `7669` - // Estimated: `11134` - // Minimum execution time: 25_260_000 picoseconds. - Weight::from_parts(25_570_000, 0) - .saturating_add(Weight::from_parts(0, 11134)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/paritydb_weights.rs deleted file mode 100644 index 25679703831..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/paritydb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights - /// are available for brave runtime engineers who may want to try this out as default. - pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::ParityDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/rocksdb_weights.rs deleted file mode 100644 index 3dd817aa6f1..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/rocksdb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout - /// the runtime. - pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::RocksDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs deleted file mode 100644 index 33a48f36812..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/mod.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -mod pallet_xcm_benchmarks_fungible; -mod pallet_xcm_benchmarks_generic; - -use crate::{xcm_config::MaxAssetsIntoHolding, Runtime}; -use frame_support::weights::Weight; -use pallet_xcm_benchmarks_fungible::WeightInfo as XcmFungibleWeight; -use pallet_xcm_benchmarks_generic::WeightInfo as XcmGeneric; -use sp_std::prelude::*; -use xcm::{latest::prelude::*, DoubleEncoded}; - -trait WeighMultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight; -} - -const MAX_ASSETS: u64 = 100; - -impl WeighMultiAssets for MultiAssetFilter { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - match self { - Self::Definite(assets) => weight.saturating_mul(assets.inner().iter().count() as u64), - Self::Wild(asset) => match asset { - All => weight.saturating_mul(MAX_ASSETS), - AllOf { fun, .. } => match fun { - WildFungibility::Fungible => weight, - // Magic number 2 has to do with the fact that we could have up to 2 times - // MaxAssetsIntoHolding in the worst-case scenario. - WildFungibility::NonFungible => - weight.saturating_mul((MaxAssetsIntoHolding::get() * 2) as u64), - }, - AllCounted(count) => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - AllOfCounted { count, .. } => weight.saturating_mul(MAX_ASSETS.min(*count as u64)), - }, - } - } -} - -impl WeighMultiAssets for MultiAssets { - fn weigh_multi_assets(&self, weight: Weight) -> Weight { - weight.saturating_mul(self.inner().iter().count() as u64) - } -} - -pub struct BridgeHubPolkadotXcmWeight(core::marker::PhantomData); -impl XcmWeightInfo for BridgeHubPolkadotXcmWeight { - fn withdraw_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::withdraw_asset()) - } - fn reserve_asset_deposited(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::reserve_asset_deposited()) - } - fn receive_teleported_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::receive_teleported_asset()) - } - fn query_response( - _query_id: &u64, - _response: &Response, - _max_weight: &Weight, - _querier: &Option, - ) -> Weight { - XcmGeneric::::query_response() - } - fn transfer_asset(assets: &MultiAssets, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_asset()) - } - fn transfer_reserve_asset( - assets: &MultiAssets, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::transfer_reserve_asset()) - } - fn transact( - _origin_type: &OriginKind, - _require_weight_at_most: &Weight, - _call: &DoubleEncoded, - ) -> Weight { - XcmGeneric::::transact() - } - fn hrmp_new_channel_open_request( - _sender: &u32, - _max_message_size: &u32, - _max_capacity: &u32, - ) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_accepted(_recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn hrmp_channel_closing(_initiator: &u32, _sender: &u32, _recipient: &u32) -> Weight { - // XCM Executor does not currently support HRMP channel operations - Weight::MAX - } - fn clear_origin() -> Weight { - XcmGeneric::::clear_origin() - } - fn descend_origin(_who: &InteriorMultiLocation) -> Weight { - XcmGeneric::::descend_origin() - } - fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_error() - } - - fn deposit_asset(assets: &MultiAssetFilter, _dest: &MultiLocation) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_asset()) - } - fn deposit_reserve_asset( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::deposit_reserve_asset()) - } - fn exchange_asset(_give: &MultiAssetFilter, _receive: &MultiAssets, _maximal: &bool) -> Weight { - Weight::MAX - } - fn initiate_reserve_withdraw( - assets: &MultiAssetFilter, - _reserve: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) - } - fn initiate_teleport( - assets: &MultiAssetFilter, - _dest: &MultiLocation, - _xcm: &Xcm<()>, - ) -> Weight { - assets.weigh_multi_assets(XcmFungibleWeight::::initiate_teleport()) - } - fn report_holding(_response_info: &QueryResponseInfo, _assets: &MultiAssetFilter) -> Weight { - XcmGeneric::::report_holding() - } - fn buy_execution(_fees: &MultiAsset, _weight_limit: &WeightLimit) -> Weight { - XcmGeneric::::buy_execution() - } - fn refund_surplus() -> Weight { - XcmGeneric::::refund_surplus() - } - fn set_error_handler(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_error_handler() - } - fn set_appendix(_xcm: &Xcm) -> Weight { - XcmGeneric::::set_appendix() - } - fn clear_error() -> Weight { - XcmGeneric::::clear_error() - } - fn claim_asset(_assets: &MultiAssets, _ticket: &MultiLocation) -> Weight { - XcmGeneric::::claim_asset() - } - fn trap(_code: &u64) -> Weight { - XcmGeneric::::trap() - } - fn subscribe_version(_query_id: &QueryId, _max_response_weight: &Weight) -> Weight { - XcmGeneric::::subscribe_version() - } - fn unsubscribe_version() -> Weight { - XcmGeneric::::unsubscribe_version() - } - fn burn_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::burn_asset()) - } - fn expect_asset(assets: &MultiAssets) -> Weight { - assets.weigh_multi_assets(XcmGeneric::::expect_asset()) - } - fn expect_origin(_origin: &Option) -> Weight { - XcmGeneric::::expect_origin() - } - fn expect_error(_error: &Option<(u32, XcmError)>) -> Weight { - XcmGeneric::::expect_error() - } - fn expect_transact_status(_transact_status: &MaybeErrorCode) -> Weight { - XcmGeneric::::expect_transact_status() - } - fn query_pallet(_module_name: &Vec, _response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::query_pallet() - } - fn expect_pallet( - _index: &u32, - _name: &Vec, - _module_name: &Vec, - _crate_major: &u32, - _min_crate_minor: &u32, - ) -> Weight { - XcmGeneric::::expect_pallet() - } - fn report_transact_status(_response_info: &QueryResponseInfo) -> Weight { - XcmGeneric::::report_transact_status() - } - fn clear_transact_status() -> Weight { - XcmGeneric::::clear_transact_status() - } - fn universal_origin(_: &Junction) -> Weight { - Weight::MAX - } - fn export_message(_: &NetworkId, _: &Junctions, _: &Xcm<()>) -> Weight { - Weight::MAX - } - fn lock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn unlock_asset(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn note_unlockable(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn request_unlock(_: &MultiAsset, _: &MultiLocation) -> Weight { - Weight::MAX - } - fn set_fees_mode(_: &bool) -> Weight { - XcmGeneric::::set_fees_mode() - } - fn set_topic(_topic: &[u8; 32]) -> Weight { - XcmGeneric::::set_topic() - } - fn clear_topic() -> Weight { - XcmGeneric::::clear_topic() - } - fn alias_origin(_: &MultiLocation) -> Weight { - // XCM Executor does not currently support alias origin operations - Weight::MAX - } - fn unpaid_execution(_: &WeightLimit, _: &Option) -> Weight { - XcmGeneric::::unpaid_execution() - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs deleted file mode 100644 index 814c416bd4c..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::fungible` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm_benchmarks::fungible -// --chain=bridge-hub-polkadot-dev -// --header=./cumulus/file_header.txt -// --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::fungible`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn withdraw_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `101` - // Estimated: `3593` - // Minimum execution time: 24_237_000 picoseconds. - Weight::from_parts(24_697_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn transfer_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `153` - // Estimated: `6196` - // Minimum execution time: 52_269_000 picoseconds. - Weight::from_parts(53_848_000, 6196) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn transfer_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `223` - // Estimated: `6196` - // Minimum execution time: 77_611_000 picoseconds. - Weight::from_parts(82_634_000, 6196) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) - } - // Storage: `Benchmark::Override` (r:0 w:0) - // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn reserve_asset_deposited() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_reserve_withdraw() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 29_506_000 picoseconds. - Weight::from_parts(30_269_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn receive_teleported_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_541_000 picoseconds. - Weight::from_parts(3_629_000, 0) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - pub fn deposit_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3593` - // Minimum execution time: 25_651_000 picoseconds. - Weight::from_parts(26_078_000, 3593) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - // Storage: `System::Account` (r:1 w:1) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn deposit_reserve_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `122` - // Estimated: `3593` - // Minimum execution time: 52_050_000 picoseconds. - Weight::from_parts(53_293_000, 3593) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn initiate_teleport() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 30_009_000 picoseconds. - Weight::from_parts(30_540_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs deleted file mode 100644 index 9a039a6d63b..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_xcm_benchmarks::generic` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=bridge-hub-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weights for `pallet_xcm_benchmarks::generic`. -pub struct WeightInfo(PhantomData); -impl WeightInfo { - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_holding() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 30_923_000 picoseconds. - Weight::from_parts(31_653_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn buy_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_837_000 picoseconds. - Weight::from_parts(2_932_000, 0) - } - // Storage: `PolkadotXcm::Queries` (r:1 w:0) - // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn query_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `32` - // Estimated: `3497` - // Minimum execution time: 10_319_000 picoseconds. - Weight::from_parts(10_614_000, 3497) - .saturating_add(T::DbWeight::get().reads(1)) - } - pub fn transact() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_466_000 picoseconds. - Weight::from_parts(12_005_000, 0) - } - pub fn refund_surplus() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_039_000 picoseconds. - Weight::from_parts(3_125_000, 0) - } - pub fn set_error_handler() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_655_000 picoseconds. - Weight::from_parts(2_717_000, 0) - } - pub fn set_appendix() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_655_000 picoseconds. - Weight::from_parts(2_695_000, 0) - } - pub fn clear_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_612_000 picoseconds. - Weight::from_parts(2_685_000, 0) - } - pub fn descend_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_286_000 picoseconds. - Weight::from_parts(3_425_000, 0) - } - pub fn clear_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_613_000 picoseconds. - Weight::from_parts(2_699_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 24_616_000 picoseconds. - Weight::from_parts(25_147_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) - // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn claim_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `90` - // Estimated: `3555` - // Minimum execution time: 14_511_000 picoseconds. - Weight::from_parts(14_831_000, 3555) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn trap() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_640_000 picoseconds. - Weight::from_parts(2_702_000, 0) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn subscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `38` - // Estimated: `3503` - // Minimum execution time: 26_044_000 picoseconds. - Weight::from_parts(26_561_000, 3503) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(3)) - } - // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) - // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - pub fn unsubscribe_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_568_000 picoseconds. - Weight::from_parts(4_764_000, 0) - .saturating_add(T::DbWeight::get().writes(1)) - } - pub fn burn_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 3_953_000 picoseconds. - Weight::from_parts(4_079_000, 0) - } - pub fn expect_asset() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_793_000 picoseconds. - Weight::from_parts(2_914_000, 0) - } - pub fn expect_origin() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_719_000 picoseconds. - Weight::from_parts(2_829_000, 0) - } - pub fn expect_error() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_710_000 picoseconds. - Weight::from_parts(2_824_000, 0) - } - pub fn expect_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_941_000 picoseconds. - Weight::from_parts(3_201_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn query_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 28_080_000 picoseconds. - Weight::from_parts(28_920_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn expect_pallet() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_752_000 picoseconds. - Weight::from_parts(4_982_000, 0) - } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - pub fn report_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 24_810_000 picoseconds. - Weight::from_parts(25_270_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - pub fn clear_transact_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_676_000 picoseconds. - Weight::from_parts(2_780_000, 0) - } - pub fn set_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_624_000 picoseconds. - Weight::from_parts(2_710_000, 0) - } - pub fn clear_topic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_611_000 picoseconds. - Weight::from_parts(2_707_000, 0) - } - pub fn set_fees_mode() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_653_000 picoseconds. - Weight::from_parts(2_740_000, 0) - } - pub fn unpaid_execution() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_821_000 picoseconds. - Weight::from_parts(2_874_000, 0) - } -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs deleted file mode 100644 index 61eee1c4c5a..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/src/xcm_config.rs +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -use super::{ - AccountId, AllPalletsWithSystem, Balances, ParachainInfo, ParachainSystem, PolkadotXcm, - Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, TransactionByteFee, WeightToFee, XcmpQueue, - CENTS, -}; -use frame_support::{ - match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing}, -}; -use frame_system::EnsureRoot; -use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; -use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use xcm::latest::prelude::*; -use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, IsConcrete, ParentAsSuperuser, - ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WeightInfoBounds, WithComputedOrigin, WithUniqueTopic, -}; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; - -parameter_types! { - pub const DotRelayLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Polkadot); - pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorMultiLocation = - X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; - pub FellowshipLocation: MultiLocation = MultiLocation::new(1, Parachain(1001)); - pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); -} - -/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used -/// when determining ownership of accounts for asset transacting and when attempting to use XCM -/// `Transact` in order to determine the dispatch Origin. -pub type LocationToAccountId = ( - // The parent (Relay-chain) origin converts to the parent `AccountId`. - ParentIsPreset, - // Sibling parachain origins convert to AccountId via the `ParaId::into`. - SiblingParachainConvertsVia, - // Straight up local `AccountId32` origins just alias directly to `AccountId`. - AccountId32Aliases, -); - -/// Means for transacting the native currency on this chain. -pub type CurrencyTransactor = CurrencyAdapter< - // Use this currency: - Balances, - // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We don't track any teleports of `Balances`. - (), ->; - -/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// biases the kind of local `Origin` it will become. -pub type XcmOriginToTransactDispatchOrigin = ( - // Sovereign account converter; this attempts to derive an `AccountId` from the origin location - // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. - SovereignSignedViaLocation, - // Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when - // recognized. - RelayChainAsNative, - // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when - // recognized. - SiblingParachainAsNative, - // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a - // transaction from the Root origin. - ParentAsSuperuser, - // Native signed account converter; this just converts an `AccountId32` origin into a normal - // `RuntimeOrigin::Signed` origin of the same 32-byte value. - SignedAccountId32AsNative, - // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. - XcmPassthrough, -); - -match_types! { - pub type ParentOrParentsPlurality: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(Plurality { .. }) } - }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; - pub type FellowsPlurality: impl Contains = { - MultiLocation { parents: 1, interior: X2(Parachain(1001), Plurality { id: BodyId::Technical, ..}) } - }; -} -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) - ) - } -} - -pub type Barrier = TrailingSetTopicAsId< - DenyThenTry< - DenyReserveTransferToRelayChain, - ( - // Allow local users to buy weight credit. - TakeWeightCredit, - // Expected responses are OK. - AllowKnownQueryResponses, - WithComputedOrigin< - ( - // If the message is one that immediately attempts to pay for execution, then - // allow it. - AllowTopLevelPaidExecutionFrom, - // Parent, its pluralities (i.e. governance bodies), and the Fellows plurality - // get free execution. - AllowExplicitUnpaidExecutionFrom<(ParentOrParentsPlurality, FellowsPlurality)>, - // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, - ), - UniversalLocation, - ConstU32<8>, - >, - ), - >, ->; - -/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: -/// - DOT with the parent Relay Chain and sibling parachains. -pub type TrustedTeleporters = ConcreteAssetFromSystem; - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = CurrencyTransactor; - type OriginConverter = XcmOriginToTransactDispatchOrigin; - // BridgeHub does not recognize a reserve location for any asset. Users must teleport DOT - // where allowed (e.g. with the Relay Chain). - type IsReserve = (); - type IsTeleporter = TrustedTeleporters; - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = WeightInfoBounds< - crate::weights::xcm::BridgeHubPolkadotXcmWeight, - RuntimeCall, - MaxInstructions, - >; - type Trader = - UsingComponents>; - type ResponseHandler = PolkadotXcm; - type AssetTrap = PolkadotXcm; - type AssetClaims = PolkadotXcm; - type SubscriptionService = PolkadotXcm; - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; - type Aliasers = Nothing; -} - -/// Converts a local signed origin into an XCM multilocation. -/// Forms the basis for local origins sending/executing XCMs. -pub type LocalOriginToLocation = SignedToAccountId32; - -parameter_types! { - /// The asset ID for the asset that we use to pay for message delivery fees. - pub FeeAssetId: AssetId = Concrete(DotRelayLocation::get()); - /// The base fee for the message delivery fees. - pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); -} - -pub type PriceForParentDelivery = - ExponentialPrice; - -/// The means for routing XCM messages which are not for local execution into the right message -/// queues. -pub type XcmRouter = WithUniqueTopic<( - // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, - // ..and XCMP to communicate with the sibling chains. - XcmpQueue, -)>; - -impl pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - // We want to disallow users sending (arbitrary) XCMs from this chain. - type SendXcmOrigin = EnsureXcmOrigin; - type XcmRouter = XcmRouter; - // We support local origins dispatching XCM executions in principle... - type ExecuteXcmOrigin = EnsureXcmOrigin; - // ... but disallow generic XCM execution. As a result only teleports are allowed. - type XcmExecuteFilter = Nothing; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. - type Weigher = WeightInfoBounds< - crate::weights::xcm::BridgeHubPolkadotXcmWeight, - RuntimeCall, - MaxInstructions, - >; - type UniversalLocation = UniversalLocation; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; - type Currency = Balances; - type CurrencyMatcher = (); - type TrustedLockers = (); - type SovereignAccountOf = LocationToAccountId; - type MaxLockers = ConstU32<8>; - type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - type AdminOrigin = EnsureRoot; - type MaxRemoteLockConsumers = ConstU32<0>; - type RemoteLockConsumerIdentifier = (); -} - -impl cumulus_pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; -} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs deleted file mode 100644 index 3156a5fe68e..00000000000 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-polkadot/tests/tests.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -pub use bridge_hub_polkadot_runtime::{ - xcm_config::XcmConfig, AllPalletsWithoutSystem, Balances, ExistentialDeposit, ParachainSystem, - PolkadotXcm, Runtime, RuntimeEvent, SessionKeys, -}; -use codec::Decode; -use frame_support::parameter_types; -use parachains_common::{polkadot::fee::WeightToFee, AccountId, AuraId}; - -const ALICE: [u8; 32] = [1u8; 32]; - -parameter_types! { - pub CheckingAccount: AccountId = PolkadotXcm::check_account(); -} - -bridge_hub_test_utils::test_cases::include_teleports_for_native_asset_works!( - Runtime, - AllPalletsWithoutSystem, - XcmConfig, - CheckingAccount, - WeightToFee, - ParachainSystem, - bridge_hub_test_utils::CollatorSessionKeys::new( - AccountId::from(ALICE), - AccountId::from(ALICE), - SessionKeys { aura: AuraId::from(sp_core::sr25519::Public::from_raw(ALICE)) } - ), - ExistentialDeposit::get(), - Box::new(|runtime_event_encoded: Vec| { - match RuntimeEvent::decode(&mut &runtime_event_encoded[..]) { - Ok(RuntimeEvent::PolkadotXcm(event)) => Some(event), - _ => None, - } - }), - 1002 -); diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml deleted file mode 100644 index ca83b84cd8f..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/Cargo.toml +++ /dev/null @@ -1,226 +0,0 @@ -[package] -name = "collectives-polkadot-runtime" -version = "1.0.0" -authors.workspace = true -edition.workspace = true -description = "Polkadot Collectives Parachain Runtime" -license = "Apache-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } -hex-literal = { version = "0.4.1" } -log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -smallvec = "1.11.0" - -# Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-alliance = { path = "../../../../../substrate/frame/alliance", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-collective = { path = "../../../../../substrate/frame/collective", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-preimage = { path = "../../../../../substrate/frame/preimage", default-features = false } -pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false} -pallet-scheduler = { path = "../../../../../substrate/frame/scheduler", default-features = false } -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -pallet-referenda = { path = "../../../../../substrate/frame/referenda", default-features = false} -pallet-ranked-collective = { path = "../../../../../substrate/frame/ranked-collective", default-features = false} -pallet-core-fellowship = { path = "../../../../../substrate/frame/core-fellowship", default-features = false} -pallet-salary = { path = "../../../../../substrate/frame/salary", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-arithmetic = { path = "../../../../../substrate/primitives/arithmetic", default-features = false } -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} - -# Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-core-primitives = { path = "../../../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } -pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } -pallet-collective-content = { path = "../../../pallets/collective-content", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } - -[dev-dependencies] -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} - -[features] -default = [ "std" ] -runtime-benchmarks = [ - "cumulus-pallet-dmp-queue/runtime-benchmarks", - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-pallet-session-benchmarking/runtime-benchmarks", - "cumulus-pallet-xcmp-queue/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "cumulus-primitives-utility/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-alliance/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-collator-selection/runtime-benchmarks", - "pallet-collective-content/runtime-benchmarks", - "pallet-collective/runtime-benchmarks", - "pallet-core-fellowship/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-multisig/runtime-benchmarks", - "pallet-preimage/runtime-benchmarks", - "pallet-proxy/runtime-benchmarks", - "pallet-ranked-collective/runtime-benchmarks", - "pallet-referenda/runtime-benchmarks", - "pallet-salary/runtime-benchmarks", - "pallet-scheduler/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "pallet-xcm/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "polkadot-parachain-primitives/runtime-benchmarks", - "polkadot-runtime-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-dmp-queue/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "cumulus-pallet-xcmp-queue/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-alliance/try-runtime", - "pallet-aura/try-runtime", - "pallet-authorship/try-runtime", - "pallet-balances/try-runtime", - "pallet-collator-selection/try-runtime", - "pallet-collective-content/try-runtime", - "pallet-collective/try-runtime", - "pallet-core-fellowship/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-multisig/try-runtime", - "pallet-preimage/try-runtime", - "pallet-proxy/try-runtime", - "pallet-ranked-collective/try-runtime", - "pallet-referenda/try-runtime", - "pallet-salary/try-runtime", - "pallet-scheduler/try-runtime", - "pallet-session/try-runtime", - "pallet-timestamp/try-runtime", - "pallet-transaction-payment/try-runtime", - "pallet-utility/try-runtime", - "pallet-xcm/try-runtime", - "parachain-info/try-runtime", - "polkadot-runtime-common/try-runtime", - "sp-runtime/try-runtime", -] -std = [ - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-dmp-queue/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-session-benchmarking/std", - "cumulus-pallet-xcm/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-primitives-core/std", - "cumulus-primitives-utility/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "log/std", - "pallet-alliance/std", - "pallet-aura/std", - "pallet-authorship/std", - "pallet-balances/std", - "pallet-collator-selection/std", - "pallet-collective-content/std", - "pallet-collective/std", - "pallet-core-fellowship/std", - "pallet-message-queue/std", - "pallet-multisig/std", - "pallet-preimage/std", - "pallet-proxy/std", - "pallet-ranked-collective/std", - "pallet-referenda/std", - "pallet-salary/std", - "pallet-scheduler/std", - "pallet-session/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "pallet-utility/std", - "pallet-xcm/std", - "parachain-info/std", - "parachains-common/std", - "polkadot-core-primitives/std", - "polkadot-parachain-primitives/std", - "polkadot-runtime-common/std", - "scale-info/std", - "sp-api/std", - "sp-arithmetic/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-io/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "substrate-wasm-builder", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] - -experimental = [ "pallet-aura/experimental" ] diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/build.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/build.rs deleted file mode 100644 index 60f8a125129..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/build.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#[cfg(feature = "std")] -fn main() { - substrate_wasm_builder::WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} - -#[cfg(not(feature = "std"))] -fn main() {} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs deleted file mode 100644 index b055ffc8abf..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/mod.rs +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The Ambassador Program. -//! -//! The module defines the following on-chain functionality of the Ambassador Program: -//! -//! - Managed set of program members, where every member has a [rank](ranks) -//! (via [AmbassadorCollective](pallet_ranked_collective)). -//! - Referendum functionality for the program members to propose, vote on, and execute -//! proposals on behalf of the members of a certain [rank](Origin) -//! (via [AmbassadorReferenda](pallet_referenda)). -//! - Managed content (charter, announcements) (via [pallet_collective_content]). -//! - Promotion and demotion periods, register of members' activity, and rank based salaries -//! (via [AmbassadorCore](pallet_core_fellowship)). -//! - Members' salaries (via [AmbassadorSalary](pallet_salary), requiring a member to be -//! imported or inducted into [AmbassadorCore](pallet_core_fellowship)). - -pub mod origins; -mod tracks; - -use super::*; -use crate::xcm_config::{DotAssetHub, FellowshipAdminBodyId}; -use frame_support::traits::{EitherOf, MapSuccess, TryMapSuccess}; -pub use origins::pallet_origins as pallet_ambassador_origins; -use origins::pallet_origins::{ - EnsureAmbassadorsVoice, EnsureAmbassadorsVoiceFrom, EnsureHeadAmbassadorsVoice, Origin, -}; -use parachains_common::polkadot::account; -use sp_core::ConstU128; -use sp_runtime::traits::{CheckedReduceBy, ConstU16, ConvertToValue, Replace}; -use xcm::prelude::*; -use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; - -/// The Ambassador Program's member ranks. -pub mod ranks { - use pallet_ranked_collective::Rank; - - #[allow(dead_code)] - pub const CANDIDATE: Rank = 0; - pub const AMBASSADOR_TIER_1: Rank = 1; - pub const AMBASSADOR_TIER_2: Rank = 2; - pub const SENIOR_AMBASSADOR_TIER_3: Rank = 3; - pub const SENIOR_AMBASSADOR_TIER_4: Rank = 4; - pub const HEAD_AMBASSADOR_TIER_5: Rank = 5; - pub const HEAD_AMBASSADOR_TIER_6: Rank = 6; - pub const HEAD_AMBASSADOR_TIER_7: Rank = 7; - pub const MASTER_AMBASSADOR_TIER_8: Rank = 8; - pub const MASTER_AMBASSADOR_TIER_9: Rank = 9; -} - -impl pallet_ambassador_origins::Config for Runtime {} - -pub type AmbassadorCollectiveInstance = pallet_ranked_collective::Instance2; - -/// Demotion is by any of: -/// - Root can promote arbitrarily. -/// - the FellowshipAdmin origin (i.e. token holder referendum); -/// - a senior members vote by the rank two above the current rank. -pub type DemoteOrigin = EitherOf< - frame_system::EnsureRootWithSuccess>, - EitherOf< - MapSuccess< - EnsureXcm>, - Replace>, - >, - TryMapSuccess< - EnsureAmbassadorsVoiceFrom>, - CheckedReduceBy>, - >, - >, ->; - -/// Promotion and approval (rank-retention) is by any of: -/// - Root can promote arbitrarily. -/// - the FellowshipAdmin origin (i.e. token holder referendum); -/// - a senior members vote by the rank two above the new/current rank. -/// - a member of rank `5` or above can add a candidate (rank `0`). -pub type PromoteOrigin = EitherOf< - DemoteOrigin, - TryMapSuccess< - pallet_ranked_collective::EnsureMember< - Runtime, - AmbassadorCollectiveInstance, - { ranks::HEAD_AMBASSADOR_TIER_5 }, - >, - Replace>, - >, ->; - -impl pallet_ranked_collective::Config for Runtime { - type WeightInfo = weights::pallet_ranked_collective_ambassador_collective::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type PromoteOrigin = PromoteOrigin; - type DemoteOrigin = DemoteOrigin; - type Polls = AmbassadorReferenda; - type MinRankOfClass = sp_runtime::traits::Identity; - type VoteWeight = pallet_ranked_collective::Linear; -} - -parameter_types! { - pub const AlarmInterval: BlockNumber = 1; - pub const SubmissionDeposit: Balance = 0; - pub const UndecidingTimeout: BlockNumber = 7 * DAYS; - // The Ambassador Referenda pallet account, used as a temporarily place to deposit a slashed imbalance before teleport to the treasury. - pub AmbassadorPalletAccount: AccountId = account::AMBASSADOR_REFERENDA_PALLET_ID.into_account_truncating(); -} - -pub type AmbassadorReferendaInstance = pallet_referenda::Instance2; - -impl pallet_referenda::Config for Runtime { - type WeightInfo = weights::pallet_referenda_ambassador_referenda::WeightInfo; - type RuntimeCall = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type Scheduler = Scheduler; - type Currency = Balances; - // A proposal can be submitted by a member of the Ambassador Program of - // [ranks::SENIOR_AMBASSADOR_TIER_3] rank or higher. - type SubmitOrigin = pallet_ranked_collective::EnsureMember< - Runtime, - AmbassadorCollectiveInstance, - { ranks::SENIOR_AMBASSADOR_TIER_3 }, - >; - type CancelOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; - type KillOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; - type Slash = ToParentTreasury; - type Votes = pallet_ranked_collective::Votes; - type Tally = pallet_ranked_collective::TallyOf; - type SubmissionDeposit = SubmissionDeposit; - type MaxQueued = ConstU32<20>; - type UndecidingTimeout = UndecidingTimeout; - type AlarmInterval = AlarmInterval; - type Tracks = tracks::TracksInfo; - type Preimages = Preimage; -} - -parameter_types! { - pub const AnnouncementLifetime: BlockNumber = 180 * DAYS; - pub const MaxAnnouncements: u32 = 50; -} - -pub type AmbassadorContentInstance = pallet_collective_content::Instance1; - -impl pallet_collective_content::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type CharterOrigin = EitherOf, EnsureHeadAmbassadorsVoice>; - type AnnouncementLifetime = AnnouncementLifetime; - // An announcement can be submitted by a Senior Ambassador member or an ambassador plurality - // voice taken via referendum. - type AnnouncementOrigin = EitherOfDiverse< - pallet_ranked_collective::EnsureMember< - Runtime, - AmbassadorCollectiveInstance, - { ranks::SENIOR_AMBASSADOR_TIER_3 }, - >, - EnsureAmbassadorsVoice, - >; - type MaxAnnouncements = MaxAnnouncements; - type WeightInfo = weights::pallet_collective_content::WeightInfo; -} - -pub type AmbassadorCoreInstance = pallet_core_fellowship::Instance2; - -impl pallet_core_fellowship::Config for Runtime { - type WeightInfo = weights::pallet_core_fellowship_ambassador_core::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type Members = pallet_ranked_collective::Pallet; - type Balance = Balance; - // Parameters are set by any of: - // - Root; - // - the FellowshipAdmin origin (i.e. token holder referendum); - // - a vote among all Head Ambassadors. - type ParamsOrigin = EitherOfDiverse< - EnsureXcm>, - EnsureHeadAmbassadorsVoice, - >; - // Induction (creating a candidate) is by any of: - // - Root; - // - the FellowshipAdmin origin (i.e. token holder referendum); - // - a single Head Ambassador; - // - a vote among all senior members. - type InductOrigin = EitherOfDiverse< - EnsureXcm>, - EitherOfDiverse< - pallet_ranked_collective::EnsureMember< - Runtime, - AmbassadorCollectiveInstance, - { ranks::HEAD_AMBASSADOR_TIER_5 }, - >, - EnsureAmbassadorsVoiceFrom>, - >, - >; - type ApproveOrigin = PromoteOrigin; - type PromoteOrigin = PromoteOrigin; - type EvidenceSize = ConstU32<65536>; -} - -pub type AmbassadorSalaryInstance = pallet_salary::Instance2; - -parameter_types! { - // The interior location on AssetHub for the paying account. This is the Ambassador Salary - // pallet instance (which sits at index 74). This sovereign account will need funding. - pub AmbassadorSalaryLocation: InteriorMultiLocation = PalletInstance(74).into(); -} - -/// [`PayOverXcm`] setup to pay the Ambassador salary on the AssetHub in DOT. -pub type AmbassadorSalaryPaymaster = PayOverXcm< - AmbassadorSalaryLocation, - crate::xcm_config::XcmRouter, - crate::PolkadotXcm, - ConstU32<{ 6 * HOURS }>, - AccountId, - (), - ConvertToValue, - AliasesIntoAccountId32<(), AccountId>, ->; - -impl pallet_salary::Config for Runtime { - type WeightInfo = weights::pallet_salary_ambassador_salary::WeightInfo; - type RuntimeEvent = RuntimeEvent; - - #[cfg(not(feature = "runtime-benchmarks"))] - type Paymaster = AmbassadorSalaryPaymaster; - #[cfg(feature = "runtime-benchmarks")] - type Paymaster = crate::impls::benchmarks::PayWithEnsure< - AmbassadorSalaryPaymaster, - crate::impls::benchmarks::OpenHrmpChannel>, - >; - type Members = pallet_ranked_collective::Pallet; - - #[cfg(not(feature = "runtime-benchmarks"))] - type Salary = pallet_core_fellowship::Pallet; - #[cfg(feature = "runtime-benchmarks")] - type Salary = frame_support::traits::tokens::ConvertRank< - crate::impls::benchmarks::RankToSalary, - >; - // 15 days to register for a salary payment. - type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; - // 15 days to claim the salary payment. - type PayoutPeriod = ConstU32<{ 15 * DAYS }>; - // Total monthly salary budget. - type Budget = ConstU128<{ 10_000 * DOLLARS }>; -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs deleted file mode 100644 index 3ce8a6b9e5d..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/origins.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The Ambassador Program's origins. - -#[frame_support::pallet] -pub mod pallet_origins { - use crate::ambassador::ranks; - use frame_support::pallet_prelude::*; - use pallet_ranked_collective::Rank; - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - /// The pallet configuration trait. - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] - #[pallet::origin] - pub enum Origin { - /// Plurality voice of the [ranks::AMBASSADOR_TIER_1] members or above given via - /// referendum. - Ambassadors, - /// Plurality voice of the [ranks::AMBASSADOR_TIER_2] members or above given via - /// referendum. - AmbassadorsTier2, - /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_3] members or above given via - /// referendum. - SeniorAmbassadors, - /// Plurality voice of the [ranks::SENIOR_AMBASSADOR_TIER_4] members or above given via - /// referendum. - SeniorAmbassadorsTier4, - /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_5] members or above given via - /// referendum. - HeadAmbassadors, - /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_6] members or above given via - /// referendum. - HeadAmbassadorsTier6, - /// Plurality voice of the [ranks::HEAD_AMBASSADOR_TIER_7] members or above given via - /// referendum. - HeadAmbassadorsTier7, - /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_8] members or above given via - /// referendum. - MasterAmbassadors, - /// Plurality voice of the [ranks::MASTER_AMBASSADOR_TIER_9] members or above given via - /// referendum. - MasterAmbassadorsTier9, - } - - impl Origin { - /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for - /// any. - pub fn as_voice(&self) -> Option { - Some(match &self { - Origin::Ambassadors => ranks::AMBASSADOR_TIER_1, - Origin::AmbassadorsTier2 => ranks::AMBASSADOR_TIER_2, - Origin::SeniorAmbassadors => ranks::SENIOR_AMBASSADOR_TIER_3, - Origin::SeniorAmbassadorsTier4 => ranks::SENIOR_AMBASSADOR_TIER_4, - Origin::HeadAmbassadors => ranks::HEAD_AMBASSADOR_TIER_5, - Origin::HeadAmbassadorsTier6 => ranks::HEAD_AMBASSADOR_TIER_6, - Origin::HeadAmbassadorsTier7 => ranks::HEAD_AMBASSADOR_TIER_7, - Origin::MasterAmbassadors => ranks::MASTER_AMBASSADOR_TIER_8, - Origin::MasterAmbassadorsTier9 => ranks::MASTER_AMBASSADOR_TIER_9, - }) - } - } - - /// Implementation of the [EnsureOrigin] trait for the [Origin::HeadAmbassadors] origin. - pub struct EnsureHeadAmbassadorsVoice; - impl> + From> EnsureOrigin for EnsureHeadAmbassadorsVoice { - type Success = (); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - Origin::HeadAmbassadors => Ok(()), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(Origin::HeadAmbassadors)) - } - } - - /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s - /// from a given rank `R` with the success result of the corresponding [Rank]. - pub struct EnsureAmbassadorsVoiceFrom(PhantomData); - impl, O: Into> + From> EnsureOrigin - for EnsureAmbassadorsVoiceFrom - { - type Success = Rank; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match Origin::as_voice(&o) { - Some(r) if r >= R::get() => Ok(r), - _ => Err(O::from(o)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - ranks::MASTER_AMBASSADOR_TIER_9 - .ge(&R::get()) - .then(|| O::from(Origin::MasterAmbassadorsTier9)) - .ok_or(()) - } - } - - /// Implementation of the [EnsureOrigin] trait for the plurality voice [Origin]s with the - /// success result of the corresponding [Rank]. - pub struct EnsureAmbassadorsVoice; - impl> + From> EnsureOrigin for EnsureAmbassadorsVoice { - type Success = Rank; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| Origin::as_voice(&o).ok_or(O::from(o))) - } - - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(Origin::MasterAmbassadorsTier9)) - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs deleted file mode 100644 index 073d8e6ee36..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/ambassador/tracks.rs +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The Ambassador Program's referenda voting tracks. - -use super::Origin; -use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS}; -use sp_runtime::Perbill; - -/// Referendum `TrackId` type. -pub type TrackId = u16; - -/// Referendum track IDs. -pub mod constants { - use super::TrackId; - - pub const AMBASSADOR_TIER_1: TrackId = 1; - pub const AMBASSADOR_TIER_2: TrackId = 2; - pub const SENIOR_AMBASSADOR_TIER_3: TrackId = 3; - pub const SENIOR_AMBASSADOR_TIER_4: TrackId = 4; - pub const HEAD_AMBASSADOR_TIER_5: TrackId = 5; - pub const HEAD_AMBASSADOR_TIER_6: TrackId = 6; - pub const HEAD_AMBASSADOR_TIER_7: TrackId = 7; - pub const MASTER_AMBASSADOR_TIER_8: TrackId = 8; - pub const MASTER_AMBASSADOR_TIER_9: TrackId = 9; -} - -/// The type implementing the [`pallet_referenda::TracksInfo`] trait for referenda pallet. -pub struct TracksInfo; - -/// Information on the voting tracks. -impl pallet_referenda::TracksInfo for TracksInfo { - type Id = TrackId; - - type RuntimeOrigin = ::PalletsOrigin; - - /// Return the array of available tracks and their information. - fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { - static DATA: [(TrackId, pallet_referenda::TrackInfo); 9] = [ - ( - constants::AMBASSADOR_TIER_1, - pallet_referenda::TrackInfo { - name: "ambassador tier 1", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ( - constants::AMBASSADOR_TIER_2, - pallet_referenda::TrackInfo { - name: "ambassador tier 2", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ( - constants::SENIOR_AMBASSADOR_TIER_3, - pallet_referenda::TrackInfo { - name: "senior ambassador tier 3", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ( - constants::SENIOR_AMBASSADOR_TIER_4, - pallet_referenda::TrackInfo { - name: "senior ambassador tier 4", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ( - constants::HEAD_AMBASSADOR_TIER_5, - pallet_referenda::TrackInfo { - name: "head ambassador tier 5", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ( - constants::HEAD_AMBASSADOR_TIER_6, - pallet_referenda::TrackInfo { - name: "head ambassador tier 6", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ( - constants::HEAD_AMBASSADOR_TIER_7, - pallet_referenda::TrackInfo { - name: "head ambassador tier 7", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ( - constants::MASTER_AMBASSADOR_TIER_8, - pallet_referenda::TrackInfo { - name: "master ambassador tier 8", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ( - constants::MASTER_AMBASSADOR_TIER_9, - pallet_referenda::TrackInfo { - name: "master ambassador tier 9", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 24 * HOURS, - decision_period: 7 * DAYS, - confirm_period: 24 * HOURS, - min_enactment_period: 1 * HOURS, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(50), - }, - }, - ), - ]; - &DATA[..] - } - - /// Determine the voting track for the given `origin`. - fn track_for(id: &Self::RuntimeOrigin) -> Result { - #[cfg(feature = "runtime-benchmarks")] - { - // For benchmarks, we enable a root origin. - // It is important that this is not available in production! - let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); - if &root == id { - return Ok(constants::MASTER_AMBASSADOR_TIER_9) - } - } - - match Origin::try_from(id.clone()) { - Ok(Origin::Ambassadors) => Ok(constants::AMBASSADOR_TIER_1), - Ok(Origin::AmbassadorsTier2) => Ok(constants::AMBASSADOR_TIER_2), - Ok(Origin::SeniorAmbassadors) => Ok(constants::SENIOR_AMBASSADOR_TIER_3), - Ok(Origin::SeniorAmbassadorsTier4) => Ok(constants::SENIOR_AMBASSADOR_TIER_4), - Ok(Origin::HeadAmbassadors) => Ok(constants::HEAD_AMBASSADOR_TIER_5), - Ok(Origin::HeadAmbassadorsTier6) => Ok(constants::HEAD_AMBASSADOR_TIER_6), - Ok(Origin::HeadAmbassadorsTier7) => Ok(constants::HEAD_AMBASSADOR_TIER_7), - Ok(Origin::MasterAmbassadors) => Ok(constants::MASTER_AMBASSADOR_TIER_8), - Ok(Origin::MasterAmbassadorsTier9) => Ok(constants::MASTER_AMBASSADOR_TIER_9), - _ => Err(()), - } - } -} - -// implements [`frame_support::traits::Get`] for [`TracksInfo`] -pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/migration.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/migration.rs deleted file mode 100644 index 9350d03a2c9..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/migration.rs +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Migrations. - -use frame_support::{pallet_prelude::*, traits::OnRuntimeUpgrade, weights::Weight}; -use log; - -/// Initial import of the Kusama Technical Fellowship. -pub(crate) mod import_kusama_fellowship { - use super::*; - use frame_support::{parameter_types, traits::RankedMembers}; - use pallet_ranked_collective::{Config, MemberCount, Pallet as RankedCollective, Rank}; - #[cfg(feature = "try-runtime")] - use sp_std::vec::Vec; - - const TARGET: &str = "runtime::migration::import_fellowship"; - - parameter_types! { - // The Fellowship addresses from Kusama state. - pub const FellowshipAddresses: [(Rank, [u8; 32]); 47] = [ - (6, hex_literal::hex!("f0673d30606ee26672707e4fd2bc8b58d3becb7aba2d5f60add64abb5fea4710"),), - (6, hex_literal::hex!("3c235e80e35082b668682531b9b062fda39a46edb94f884d9122d86885fd5f1b"),), - (6, hex_literal::hex!("7628a5be63c4d3c8dbb96c2904b1a9682e02831a1af836c7efc808020b92fa63"),), - (5, hex_literal::hex!("9c84f75e0b1b92f6b003bde6212a8b2c9b776f3720f942b33fed8709f103a268"),), - (5, hex_literal::hex!("bc64065524532ed9e805fb0d39a5c0199216b52871168e5e4d0ab612f8797d61"),), - (5, hex_literal::hex!("2e1884c53071526483b14004e894415f02b55fc2e2aef8e1df8ccf7ce5bd5570"),), - (5, hex_literal::hex!("5c5062779d44ea2ab0469e155b8cf3e004fce71b3b3d38263cd9fa9478f12f28"),), - (4, hex_literal::hex!("4adf51a47b72795366d52285e329229c836ea7bbfe139dbe8fa0700c4f86fc56"),), - (4, hex_literal::hex!("1c90e3dabd3fd0f6bc648045018f78fcee8fe24122c22d8d2a14e9905073d10f"),), - (4, hex_literal::hex!("8e851ed992228f2268ee8c614fe6075d3800060ae14098e0309413a0a81c4470"),), - (3, hex_literal::hex!("720d807d46b941703ffe0278e8b173dc6738c5af8af812ceffc90c69390bbf1f"),), - (3, hex_literal::hex!("c4965f7fe7be8174717a24ffddf684986d122c7e293ddf875cdf9700a07b6812"),), - (3, hex_literal::hex!("beae5bcad1a8c156291b7ddf46b38b0c61a6aaacebd57b21c75627bfe7f9ab71"),), - (3, hex_literal::hex!("ccd87fa65729f7bdaa8305581a7a499aa24c118e83f5714152c0e22617c6fc63"),), - (3, hex_literal::hex!("e0f0f94962fc0a8c1a0f0527dc8e592c67939c46c903b6016cc0a8515da0044d"),), - (3, hex_literal::hex!("984e16482c99cfad1436111e321a86d87d0fac203bf64538f888e45d793b5413"),), - (3, hex_literal::hex!("44a3efb5bfa9023d4ef27b7d31d76f531b4d7772b1679b7fb32b6263ac39100e"),), - (2, hex_literal::hex!("2eba9a39dbfdd5f3cba964355d45e27319f0271023c0353d97dc6df2401b0e3d"),), - (2, hex_literal::hex!("ba3e9b87792bcfcc237fa8181185b8883c77f3e24f45e4a92ab31d07a4703520"),), - (2, hex_literal::hex!("9e6eb74b0a6b39de36fb58d1fab20bc2b3fea96023ce5a47941c20480d99f92e"),), - (2, hex_literal::hex!("ee3d9d8c48ee88dce78fd7bafe3ce2052900eb465085b9324d4f5da26b145f2b"),), - (2, hex_literal::hex!("d8290537d6e31fe1ff165eaa62b63f6f3556dcc720b0d3a6d7eab96275617304"),), - (2, hex_literal::hex!("5a090c88f0438b46b451026597cee760a7bac9d396c9c7b529b68fb78aec5f43"),), - (2, hex_literal::hex!("18d30040a8245c5ff17afc9a8169d7d0771fe7ab4135a64a022c254117340720"),), - (1, hex_literal::hex!("b4f7f03bebc56ebe96bc52ea5ed3159d45a0ce3a8d7f082983c33ef133274747"),), - (1, hex_literal::hex!("caafae0aaa6333fcf4dc193146945fe8e4da74aa6c16d481eef0ca35b8279d73"),), - (1, hex_literal::hex!("a66e0f4e1a121cc83fddf3096e8ec8c9e9c85989f276e39e951fb0e4a5398763"),), - (1, hex_literal::hex!("f65f3cade8f68e8f34c6266b0d37e58a754059ca96816e964f98e17c79505073"),), - (1, hex_literal::hex!("8c232c91ef2a9983ba65c4b75bb86fcbae4d909900ea8aa06c3644ca1161db48"),), - (1, hex_literal::hex!("78e4813814891bd48bc745b79254a978833d41fbe0f387df93cd87eae2468926"),), - (1, hex_literal::hex!("d44824ac8d1edecca67639ca74d208bd2044a10e67c9677e288080191e3fec13"),), - (1, hex_literal::hex!("585e982d74da4f4290d20a73800cfd705cf59e1f5880aaee5506b5eaaf544f49"),), - (1, hex_literal::hex!("d851f44a6f0d0d2f3439a51f2f75f66f4ea1a8e6c33c32f9af75fc188afb7546"),), - (1, hex_literal::hex!("dca89b135d1a6aee0a498610a70eeaed056727c8a4d220da245842e540a54a74"),), - (1, hex_literal::hex!("aa91fc0201f26b713a018669bcd269babf25368eee2493323b1ce0190a178a27"),), - (1, hex_literal::hex!("dc20836f2e4b88c1858d1e3f918e7358043b4a8abcd2874e74d91d26c52eca2a"),), - (1, hex_literal::hex!("145d6c503d0cf97f4c7725ca773741bd02e1760bfb52e021af5a9f2de283012c"),), - (1, hex_literal::hex!("307183930b2264c5165f4a210a99520c5f1672b0413d57769fabc19e6866fb25"),), - (1, hex_literal::hex!("6201961514cf5ad87f1c4dd0c392ee28231f805f77975147bf2c33bd671b9822"),), - (1, hex_literal::hex!("c6f57237cd4abfbeed99171495fc784e45a9d5d2814d435de40de00991a73c06"),), - (1, hex_literal::hex!("c1df5c7e8ca56037450c58734326ebe34aec8f7d1928322a12164856365fea73"),), - (1, hex_literal::hex!("12c039004da5e1e846aae808277098c719cef1f4985aed00161a42ac4f0e002f"),), - (1, hex_literal::hex!("7460ac178015d2a7c289bb68ef9fdaac071596ab4425c276a0040aaac7055566"),), - (1, hex_literal::hex!("eec4bd650a277342ebba0954ac786df2623bd6a9d6d3e69b484482336c549f79"),), - (1, hex_literal::hex!("e287c7494655d636a846f5c3347ad2cb3c462a8d46e0832be70fcc0ab54ee62d"),), - (1, hex_literal::hex!("82bf733f44a840f0a5c1935a002d4e541d81298fad6d1da8124073485983860e"),), - (1, hex_literal::hex!("d5b89078eed9b9dfec5c7d8413bac0b720bad3bd4078c4d8c894325713192502"),), - ]; - } - - /// Implements `OnRuntimeUpgrade` trait. - pub struct Migration(PhantomData<(T, I)>); - - impl, I: 'static> OnRuntimeUpgrade for Migration - where - ::AccountId: From<[u8; 32]>, - { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let onchain_version = RankedCollective::::on_chain_storage_version(); - ensure!(onchain_version == 0, "the storage version must be 0."); - let member_count = MemberCount::::get(0); - ensure!(member_count == 0, "the collective must be uninitialized."); - - Ok(Vec::new()) - } - - fn on_runtime_upgrade() -> Weight { - let current_version = RankedCollective::::current_storage_version(); - let onchain_version = RankedCollective::::on_chain_storage_version(); - let mut weight = T::DbWeight::get().reads(1); - log::info!( - target: TARGET, - "running migration with current storage version {:?} / onchain {:?}.", - current_version, - onchain_version - ); - if onchain_version != 0 { - log::warn!( - target: TARGET, - "unsupported storage version, skipping import_fellowship migration." - ); - return weight - } - let member_count = MemberCount::::get(0); - weight.saturating_accrue(T::DbWeight::get().reads(1)); - if member_count != 0 { - log::warn!( - target: TARGET, - "the collective already initialized, skipping import_fellowship migration." - ); - return weight - } - - for (rank, account_id32) in FellowshipAddresses::get() { - let who: T::AccountId = account_id32.into(); - let _ = as RankedMembers>::induct(&who); - for _ in 0..rank { - let _ = as RankedMembers>::promote(&who); - // 1 write to `IdToIndex` and `IndexToId` per member on each rank. - weight.saturating_accrue(T::DbWeight::get().writes(2)); - } - // 1 write to `IdToIndex` and `IndexToId` per member on each rank. - weight.saturating_accrue(T::DbWeight::get().writes(2)); - // 1 read and 1 write to `Members` and `MemberCount` per member. - weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2)); - } - weight - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_state: Vec) -> Result<(), sp_runtime::TryRuntimeError> { - ensure!(MemberCount::::get(0) == 47, "invalid members count at rank 0."); - ensure!(MemberCount::::get(1) == 47, "invalid members count at rank 1."); - ensure!(MemberCount::::get(2) == 24, "invalid members count at rank 2."); - ensure!(MemberCount::::get(3) == 17, "invalid members count at rank 3."); - ensure!(MemberCount::::get(4) == 10, "invalid members count at rank 4."); - ensure!(MemberCount::::get(5) == 7, "invalid members count at rank 5."); - ensure!(MemberCount::::get(6) == 3, "invalid members count at rank 6."); - ensure!(MemberCount::::get(7) == 0, "invalid members count at rank 7."); - Ok(()) - } - } -} - -#[cfg(test)] -pub mod tests { - use super::import_kusama_fellowship::FellowshipAddresses; - use crate::{FellowshipCollectiveInstance as Fellowship, Runtime, System}; - use frame_support::traits::OnRuntimeUpgrade; - use pallet_ranked_collective::Rank; - use parachains_common::AccountId; - use sp_core::crypto::Ss58Codec; - use sp_runtime::{AccountId32, BuildStorage}; - - #[test] - fn check_fellowship_addresses() { - let fellowship_addresses = FellowshipAddresses::get(); - let kusama_fellowship_ss58: [(Rank, _); 47] = [ - (6, "16SDAKg9N6kKAbhgDyxBXdHEwpwHUHs2CNEiLNGeZV55qHna"), /* proof https://kusama.subscan.io/extrinsic/16832707-4 */ - (6, "12MrP337azmkTdfCUKe5XLnSQrbgEKqqfZ4PQC7CZTJKAWR3"), /* proof https://kusama.subscan.io/extrinsic/16967809-2 */ - (6, "FFFF3gBSSDFSvK2HBq4qgLH75DHqXWPHeCnR1BSksAMacBs"), - (5, "G7YVCdxZb8JLpAm9WMnJdNuojNT84AzU62zmvx5P1FMNtg2"), - (5, "15G1iXDLgFyfnJ51FKq1ts44TduMyUtekvzQi9my4hgYt2hs"), /* proof https://kusama.subscan.io/extrinsic/16917610-2 */ - (5, "Dcm1BqR4N7nHuV43TXdET7pNibt1Nzm42FggPHpxKRven53"), - (5, "1363HWTPzDrzAQ6ChFiMU6mP4b6jmQid2ae55JQcKtZnpLGv"), /* proof https://kusama.subscan.io/extrinsic/16961180-2 */ - (4, "EGVQCe73TpFyAZx5uKfE1222XfkT3BSKozjgcqzLBnc5eYo"), - (4, "1eTPAR2TuqLyidmPT9rMmuycHVm9s9czu78sePqg2KHMDrE"), /* proof https://kusama.subscan.io/extrinsic/16921712-3 */ - (4, "14DsLzVyTUTDMm2eP3czwPbH53KgqnQRp3CJJZS9GR7yxGDP"), /* proof https://kusama.subscan.io/extrinsic/16917519-2 */ - (3, "13aYUFHB3umoPoxBEAHSv451iR3RpsNi3t5yBZjX2trCtTp6"), /* proof https://kusama.subscan.io/extrinsic/16917832-3 */ - (3, "H25aCspunTUqAt4D1gC776vKZ8FX3MvQJ3Jde6qDXPQaFxk"), - (3, "GtLQoW4ZqcjExMPq6qB22bYc6NaX1yMzRuGWpSRiHqnzRb9"), - (3, "15db5ksZgmhWE9U8MDq4wLKUdFivLVBybztWV8nmaJvv3NU1"), /* proof https://kusama.subscan.io/extrinsic/16876631-2 */ - (3, "HfFpz4QUxfbocHudf8UU7cMgHqkHpf855Me5X846PZAsAYE"), - (3, "14ShUZUYUR35RBZW6uVVt1zXDxmSQddkeDdXf1JkMA6P721N"), /* proof https://kusama.subscan.io/extrinsic/16918890-8 */ - (3, "12YzxR5TvGzfMVZNnhAJ5Hwi5zExpRWMKv2MuMwZTrddvgoi"), /* proof https://kusama.subscan.io/extrinsic/16924324-3 */ - (2, "Ddb9puChKMHq4gM6o47E551wAmaNeu6kHngX1jzNNqAw782"), - (2, "15DCWHQknBjc5YPFoVj8Pn2KoqrqYywJJ95BYNYJ4Fj3NLqz"), /* proof https://kusama.subscan.io/extrinsic/16834952-2 */ - (2, "14ajTQdrtCA8wZmC4PgD8Y1B2Gy8L4Z3oi2fodxq9FehcFrM"), /* proof https://kusama.subscan.io/extrinsic/16944257-2 */ - (2, "HxhDbS3grLurk1dhDgPiuDaRowHY1xHCU8Vu8on3fdg85tx"), - (2, "HTk3eccL7WBkiyxz1gBcqQRghsJigoDMD7mnQaz1UAbMpQV"), - (2, "EcNWrSPSDcVBRymwr26kk4JVFg92PdoU5Xwp87W2FgFSt9c"), - (2, "D8sM6vKjWaeKy2zCPYWGkLLbWdUtWQrXBTQqr4dSYnVQo21"), - (1, "GfbnnEgRU94n9ed4RFZ6Z9dBAWs5obykigJSwXKU9hsT2uU"), - (1, "HA5NtttvyZsxo4wGxGoJJSMaWtdEFZAuGUMFHVWD7fgenPv"), - (1, "14mDeKZ7qp9hqBjjDg51c8BFrf9o69om8piSSRwj2fT5Yb1i"), /* proof https://kusama.subscan.io/extrinsic/16919020-4 */ - (1, "16a357f5Sxab3V2ne4emGQvqJaCLeYpTMx3TCjnQhmJQ71DX"), /* proof https://kusama.subscan.io/extrinsic/16836396-5 */ - (1, "14Ak9rrF6RKHHoLLRUYMnzcvvi1t8E1yAMa7tcmiwUfaqzYK"), /* proof https://kusama.subscan.io/extrinsic/16921990-3 */ - (1, "FJq9JpA9P7EXbmfsN9YiewJaDbQyL6vQyksGtJvzfbn6zf8"), - (1, "15oLanodWWweiZJSoDTEBtrX7oGfq6e8ct5y5E6fVRDPhUgj"), /* proof https://kusama.subscan.io/extrinsic/16876423-7 */ - (1, "EaBqDJJNsZmYdQ4xn1vomPJVNh7fjA6UztZeEjn7ZzdeT7V"), - (1, "HTxCvXKVvUZ7PQq175kCRRLu7XkGfTfErrdNXr1ZuuwVZWv"), - (1, "HZe91A6a1xqbKaw6ofx3GFepJjhVXHrwHEwn6YUDDFphpX9"), - (1, "GRy2P3kBEzSHCbmDJfquku1cyUyhZaAqojRcNE4A4U3MnLd"), - (1, "HYwiBo7Mcv7uUDg4MUoKm2fxzv4dMLAtmmNfzHV8qcQJpAE"), - (1, "1ThiBx5DDxFhoD9GY6tz5Fp4Y7Xn1xfLmDddcoFQghDvvjg"), /* proof https://kusama.subscan.io/extrinsic/16918130-2 */ - (1, "DfqY6XQUSETTszBQ1juocTcG9iiDoXhvq1CoVadBSUqTGJS"), - (1, "EnpgVWGGQVrFdSB2qeXRVdtccV6U5ZscNELBoERbkFD8Wi6"), - (1, "H5BuqCmucJhUUuvjAzPazeVwVCtUSXVQdc5Dnx2q5zD7rVn"), - (1, "GxX7S1pTDdeaGUjpEPPF2we6tgHDhbatFG25pVmVFtGHLH6"), - (1, "CzuUtvKhZNZBjyAXeYviaRXwrLhVrsupJ9PrWmdq7BJTjGR"), - (1, "FCunn2Rx8JqfT5g6noUKKazph4jLDba5rUee7o3ZmJ362Ju"), - (1, "HyPMjWRHCpJS7x2SZ2R6M2XG5ZiCiZag4U4r7gBHRsE5mTc"), - (1, "1682A5hxfiS1Kn1jrUnMYv14T9EuEnsgnBbujGfYbeEbSK3w"), /* proof https://kusama.subscan.io/extrinsic/16919077-2 */ - (1, "13xS6fK6MHjApLnjdX7TJYw1niZmiXasSN91bNtiXQjgEtNx"), /* proof https://kusama.subscan.io/extrinsic/16918212-7 */ - (1, "15qE2YAQCs5Y962RHE7RzNjQxU6Pei21nhkkSM9Sojq1hHps"), /* https://kusama.subscan.io/extrinsic/17352973-2 */ - ]; - - for (index, val) in kusama_fellowship_ss58.iter().enumerate() { - let account: AccountId32 = ::from_string(val.1).unwrap(); - let account32: [u8; 32] = account.clone().into(); - assert_eq!( - fellowship_addresses[index].0, kusama_fellowship_ss58[index].0, - "ranks must be equal." - ); - assert_eq!(fellowship_addresses[index].1, account32, "accounts must be equal."); - } - } - - #[test] - fn test_fellowship_import() { - use super::import_kusama_fellowship::Migration; - use pallet_ranked_collective::{IdToIndex, IndexToId, MemberCount, MemberRecord, Members}; - - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext.execute_with(|| { - assert_eq!(MemberCount::::get(0), 0); - Migration::::on_runtime_upgrade(); - assert_eq!(MemberCount::::get(0), 47); - assert_eq!(MemberCount::::get(6), 3); - assert_eq!(MemberCount::::get(7), 0); - for (rank, account_id32) in FellowshipAddresses::get() { - let who = ::AccountId::from(account_id32); - assert!(IdToIndex::::get(0, &who).is_some()); - assert!(IdToIndex::::get(rank + 1, &who).is_none()); - let index = IdToIndex::::get(rank, &who).unwrap(); - assert_eq!(IndexToId::::get(rank, index).unwrap(), who); - assert_eq!( - Members::::get(&who).unwrap(), - MemberRecord::new(rank) - ); - } - }); - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs deleted file mode 100644 index 2a2757ea5ce..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/mod.rs +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! The Polkadot Technical Fellowship. - -pub(crate) mod migration; -mod origins; -mod tracks; -use crate::{ - impls::ToParentTreasury, - weights, - xcm_config::{FellowshipAdminBodyId, UsdtAssetHub}, - AccountId, Balance, Balances, FellowshipReferenda, GovernanceLocation, PolkadotTreasuryAccount, - Preimage, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, Scheduler, DAYS, -}; -use frame_support::{ - parameter_types, - traits::{EitherOf, EitherOfDiverse, MapSuccess, OriginTrait, TryWithMorphedArg}, -}; -use frame_system::EnsureRootWithSuccess; -pub use origins::{ - pallet_origins as pallet_fellowship_origins, Architects, EnsureCanPromoteTo, EnsureCanRetainAt, - EnsureFellowship, Fellows, Masters, Members, ToVoice, -}; -use pallet_ranked_collective::EnsureOfRank; -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use parachains_common::{polkadot::account, HOURS}; -use sp_core::{ConstU128, ConstU32}; -use sp_runtime::traits::{AccountIdConversion, ConstU16, ConvertToValue, Replace, TakeFirst}; -use xcm_builder::{AliasesIntoAccountId32, PayOverXcm}; - -#[cfg(feature = "runtime-benchmarks")] -use crate::impls::benchmarks::{OpenHrmpChannel, PayWithEnsure}; - -/// The Fellowship members' ranks. -pub mod ranks { - use pallet_ranked_collective::Rank; - - pub const DAN_1: Rank = 1; // aka Members. - pub const DAN_2: Rank = 2; - pub const DAN_3: Rank = 3; // aka Fellows. - pub const DAN_4: Rank = 4; // aka Architects. - pub const DAN_5: Rank = 5; - pub const DAN_6: Rank = 6; - pub const DAN_7: Rank = 7; // aka Masters. - pub const DAN_8: Rank = 8; - pub const DAN_9: Rank = 9; -} - -parameter_types! { - // Referenda pallet account, used to temporarily deposit slashed imbalance before teleporting. - pub ReferendaPalletAccount: AccountId = account::REFERENDA_PALLET_ID.into_account_truncating(); -} - -impl pallet_fellowship_origins::Config for Runtime {} - -pub type FellowshipReferendaInstance = pallet_referenda::Instance1; - -impl pallet_referenda::Config for Runtime { - type WeightInfo = weights::pallet_referenda_fellowship_referenda::WeightInfo; - type RuntimeCall = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type Scheduler = Scheduler; - type Currency = Balances; - // Fellows can submit proposals. - type SubmitOrigin = EitherOf< - pallet_ranked_collective::EnsureMember, - MapSuccess< - TryWithMorphedArg< - RuntimeOrigin, - ::PalletsOrigin, - ToVoice, - EnsureOfRank, - (AccountId, u16), - >, - TakeFirst, - >, - >; - type CancelOrigin = Architects; - type KillOrigin = Masters; - type Slash = ToParentTreasury; - type Votes = pallet_ranked_collective::Votes; - type Tally = pallet_ranked_collective::TallyOf; - type SubmissionDeposit = ConstU128<0>; - type MaxQueued = ConstU32<100>; - type UndecidingTimeout = ConstU32<{ 7 * DAYS }>; - type AlarmInterval = ConstU32<1>; - type Tracks = tracks::TracksInfo; - type Preimages = Preimage; -} - -pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; - -impl pallet_ranked_collective::Config for Runtime { - type WeightInfo = weights::pallet_ranked_collective_fellowship_collective::WeightInfo; - type RuntimeEvent = RuntimeEvent; - - #[cfg(not(feature = "runtime-benchmarks"))] - // Promotions and the induction of new members are serviced by `FellowshipCore` pallet instance. - type PromoteOrigin = frame_system::EnsureNever; - #[cfg(feature = "runtime-benchmarks")] - // The maximum value of `u16` set as a success value for the root to ensure the benchmarks will - // pass. - type PromoteOrigin = EnsureRootWithSuccess>; - - // Demotion is by any of: - // - Root can demote arbitrarily. - // - the FellowshipAdmin origin (i.e. token holder referendum); - // - // The maximum value of `u16` set as a success value for the root to ensure the benchmarks will - // pass. - type DemoteOrigin = EitherOf< - EnsureRootWithSuccess>, - MapSuccess< - EnsureXcm>, - Replace>, - >, - >; - type Polls = FellowshipReferenda; - type MinRankOfClass = tracks::MinRankOfClass; - type VoteWeight = pallet_ranked_collective::Geometric; -} - -pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; - -impl pallet_core_fellowship::Config for Runtime { - type WeightInfo = weights::pallet_core_fellowship_fellowship_core::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type Members = pallet_ranked_collective::Pallet; - type Balance = Balance; - // Parameters are set by any of: - // - Root; - // - the FellowshipAdmin origin (i.e. token holder referendum); - // - a vote among all Fellows. - type ParamsOrigin = EitherOfDiverse< - EnsureXcm>, - Fellows, - >; - // Induction (creating a candidate) is by any of: - // - Root; - // - the FellowshipAdmin origin (i.e. token holder referendum); - // - a single Fellow; - // - a vote among all Members. - type InductOrigin = EitherOfDiverse< - EnsureXcm>, - EitherOfDiverse< - pallet_ranked_collective::EnsureMember< - Runtime, - FellowshipCollectiveInstance, - { ranks::DAN_3 }, - >, - Members, - >, - >; - // Approval (rank-retention) of a Member's current rank is by any of: - // - Root; - // - the FellowshipAdmin origin (i.e. token holder referendum); - // - a vote by the rank two above the current rank for all retention up to the Master rank. - type ApproveOrigin = EitherOf< - MapSuccess< - EnsureXcm>, - Replace>, - >, - EnsureCanRetainAt, - >; - // Promotion is by any of: - // - Root can promote arbitrarily. - // - the FellowshipAdmin origin (i.e. token holder referendum); - // - a vote by the rank two above the new rank for all promotions up to the Master rank. - type PromoteOrigin = EitherOf< - MapSuccess< - EnsureXcm>, - Replace>, - >, - EnsureCanPromoteTo, - >; - type EvidenceSize = ConstU32<65536>; -} - -pub type FellowshipSalaryInstance = pallet_salary::Instance1; - -use xcm::prelude::*; - -parameter_types! { - // The interior location on AssetHub for the paying account. This is the Fellowship Salary - // pallet instance (which sits at index 64). This sovereign account will need funding. - pub Interior: InteriorMultiLocation = PalletInstance(64).into(); -} - -const USDT_UNITS: u128 = 1_000_000; - -/// [`PayOverXcm`] setup to pay the Fellowship salary on the AssetHub in USDT. -pub type FellowshipSalaryPaymaster = PayOverXcm< - Interior, - crate::xcm_config::XcmRouter, - crate::PolkadotXcm, - ConstU32<{ 6 * HOURS }>, - AccountId, - (), - ConvertToValue, - AliasesIntoAccountId32<(), AccountId>, ->; - -impl pallet_salary::Config for Runtime { - type WeightInfo = weights::pallet_salary_fellowship_salary::WeightInfo; - type RuntimeEvent = RuntimeEvent; - - #[cfg(not(feature = "runtime-benchmarks"))] - type Paymaster = FellowshipSalaryPaymaster; - #[cfg(feature = "runtime-benchmarks")] - type Paymaster = PayWithEnsure>>; - type Members = pallet_ranked_collective::Pallet; - - #[cfg(not(feature = "runtime-benchmarks"))] - type Salary = pallet_core_fellowship::Pallet; - #[cfg(feature = "runtime-benchmarks")] - type Salary = frame_support::traits::tokens::ConvertRank< - crate::impls::benchmarks::RankToSalary, - >; - // 15 days to register for a salary payment. - type RegistrationPeriod = ConstU32<{ 15 * DAYS }>; - // 15 days to claim the salary payment. - type PayoutPeriod = ConstU32<{ 15 * DAYS }>; - // Total monthly salary budget. - type Budget = ConstU128<{ 100_000 * USDT_UNITS }>; -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/origins.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/origins.rs deleted file mode 100644 index 5ed2c19f79e..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/origins.rs +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Fellowship custom origins. - -use super::ranks; -pub use pallet_origins::*; - -#[frame_support::pallet] -pub mod pallet_origins { - use super::ranks; - use frame_support::pallet_prelude::*; - use pallet_ranked_collective::Rank; - - #[pallet::config] - pub trait Config: frame_system::Config {} - - #[pallet::pallet] - pub struct Pallet(_); - - #[derive(PartialEq, Eq, Clone, MaxEncodedLen, Encode, Decode, TypeInfo, RuntimeDebug)] - #[pallet::origin] - pub enum Origin { - /// Origin aggregated through weighted votes of those with rank 1 or above; `Success` is 1. - /// Aka the "voice" of all Members. - Members, - /// Origin aggregated through weighted votes of those with rank 2 or above; `Success` is 2. - /// Aka the "voice" of members at least II Dan. - Fellowship2Dan, - /// Origin aggregated through weighted votes of those with rank 3 or above; `Success` is 3. - /// Aka the "voice" of all Fellows. - Fellows, - /// Origin aggregated through weighted votes of those with rank 4 or above; `Success` is 4. - /// Aka the "voice" of members at least IV Dan. - Architects, - /// Origin aggregated through weighted votes of those with rank 5 or above; `Success` is 5. - /// Aka the "voice" of members at least V Dan. - Fellowship5Dan, - /// Origin aggregated through weighted votes of those with rank 6 or above; `Success` is 6. - /// Aka the "voice" of members at least VI Dan. - Fellowship6Dan, - /// Origin aggregated through weighted votes of those with rank 7 or above; `Success` is 7. - /// Aka the "voice" of all Masters. - Masters, - /// Origin aggregated through weighted votes of those with rank 8 or above; `Success` is 8. - /// Aka the "voice" of members at least VIII Dan. - Fellowship8Dan, - /// Origin aggregated through weighted votes of those with rank 9 or above; `Success` is 9. - /// Aka the "voice" of members at least IX Dan. - Fellowship9Dan, - - /// Origin aggregated through weighted votes of those with rank 3 or above when voting on - /// a fortnight-long track; `Success` is 1. - RetainAt1Dan, - /// Origin aggregated through weighted votes of those with rank 4 or above when voting on - /// a fortnight-long track; `Success` is 2. - RetainAt2Dan, - /// Origin aggregated through weighted votes of those with rank 5 or above when voting on - /// a fortnight-long track; `Success` is 3. - RetainAt3Dan, - /// Origin aggregated through weighted votes of those with rank 6 or above when voting on - /// a fortnight-long track; `Success` is 4. - RetainAt4Dan, - /// Origin aggregated through weighted votes of those with rank 7 or above when voting on - /// a fortnight-long track; `Success` is 5. - RetainAt5Dan, - /// Origin aggregated through weighted votes of those with rank 8 or above when voting on - /// a fortnight-long track; `Success` is 6. - RetainAt6Dan, - - /// Origin aggregated through weighted votes of those with rank 3 or above when voting on - /// a month-long track; `Success` is 1. - PromoteTo1Dan, - /// Origin aggregated through weighted votes of those with rank 4 or above when voting on - /// a month-long track; `Success` is 2. - PromoteTo2Dan, - /// Origin aggregated through weighted votes of those with rank 5 or above when voting on - /// a month-long track; `Success` is 3. - PromoteTo3Dan, - /// Origin aggregated through weighted votes of those with rank 6 or above when voting on - /// a month-long track; `Success` is 4. - PromoteTo4Dan, - /// Origin aggregated through weighted votes of those with rank 7 or above when voting on - /// a month-long track; `Success` is 5. - PromoteTo5Dan, - /// Origin aggregated through weighted votes of those with rank 8 or above when voting on - /// a month-long track; `Success` is 6. - PromoteTo6Dan, - } - - impl Origin { - /// Returns the rank that the origin `self` speaks for, or `None` if it doesn't speak for - /// any. - /// - /// `Some` will be returned only for the first 9 elements of [Origin]. - pub fn as_voice(&self) -> Option { - Some(match &self { - Origin::Members => ranks::DAN_1, - Origin::Fellowship2Dan => ranks::DAN_2, - Origin::Fellows => ranks::DAN_3, - Origin::Architects => ranks::DAN_4, - Origin::Fellowship5Dan => ranks::DAN_5, - Origin::Fellowship6Dan => ranks::DAN_6, - Origin::Masters => ranks::DAN_7, - Origin::Fellowship8Dan => ranks::DAN_8, - Origin::Fellowship9Dan => ranks::DAN_9, - _ => return None, - }) - } - } - - /// A `TryMorph` implementation which is designed to convert an aggregate `RuntimeOrigin` - /// value into the Fellowship voice it represents if it is a Fellowship pallet origin an - /// appropriate variant. See also [Origin::as_voice]. - pub struct ToVoice; - impl<'a, O: 'a + TryInto<&'a Origin>> sp_runtime::traits::TryMorph for ToVoice { - type Outcome = pallet_ranked_collective::Rank; - fn try_morph(o: O) -> Result { - o.try_into().ok().and_then(Origin::as_voice).ok_or(()) - } - } - - macro_rules! decl_unit_ensures { - ( $name:ident: $success_type:ty = $success:expr ) => { - pub struct $name; - impl> + From> - EnsureOrigin for $name - { - type Success = $success_type; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - Origin::$name => Ok($success), - r => Err(O::from(r)), - }) - } - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - Ok(O::from(Origin::$name)) - } - } - }; - ( $name:ident ) => { decl_unit_ensures! { $name : () = () } }; - ( $name:ident: $success_type:ty = $success:expr, $( $rest:tt )* ) => { - decl_unit_ensures! { $name: $success_type = $success } - decl_unit_ensures! { $( $rest )* } - }; - ( $name:ident, $( $rest:tt )* ) => { - decl_unit_ensures! { $name } - decl_unit_ensures! { $( $rest )* } - }; - () => {} - } - decl_unit_ensures!( - Members: Rank = ranks::DAN_1, - Fellows: Rank = ranks::DAN_3, - Architects: Rank = ranks::DAN_4, - Masters: Rank = ranks::DAN_7, - ); - - macro_rules! decl_ensure { - ( - $vis:vis type $name:ident: EnsureOrigin { - $( $item:ident = $success:expr, )* - } - ) => { - $vis struct $name; - impl> + From> - EnsureOrigin for $name - { - type Success = $success_type; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - $( - Origin::$item => Ok($success), - )* - r => Err(O::from(r)), - }) - } - #[cfg(feature = "runtime-benchmarks")] - fn try_successful_origin() -> Result { - // By convention the more privileged origins go later, so for greatest chance - // of success, we want the last one. - let _result: Result = Err(()); - $( - let _result: Result = Ok(O::from(Origin::$item)); - )* - _result - } - } - } - } - - // Fellowship origin indicating weighted voting from at least the rank of `Success` on a - // week-long track. - decl_ensure! { - pub type EnsureFellowship: EnsureOrigin { - Members = ranks::DAN_1, - Fellowship2Dan = ranks::DAN_2, - Fellows = ranks::DAN_3, - Architects = ranks::DAN_4, - Fellowship5Dan = ranks::DAN_5, - Fellowship6Dan = ranks::DAN_6, - Masters = ranks::DAN_7, - Fellowship8Dan = ranks::DAN_8, - Fellowship9Dan = ranks::DAN_9, - } - } - - // Fellowship origin indicating weighted voting from at least the rank of `Success + 2` on - // a fortnight-long track; needed for Fellowship retention voting. - decl_ensure! { - pub type EnsureCanRetainAt: EnsureOrigin { - RetainAt1Dan = ranks::DAN_1, - RetainAt2Dan = ranks::DAN_2, - RetainAt3Dan = ranks::DAN_3, - RetainAt4Dan = ranks::DAN_4, - RetainAt5Dan = ranks::DAN_5, - RetainAt6Dan = ranks::DAN_6, - } - } - - // Fellowship origin indicating weighted voting from at least the rank of `Success + 2` on - // a month-long track; needed for Fellowship promotion voting. - decl_ensure! { - pub type EnsureCanPromoteTo: EnsureOrigin { - PromoteTo1Dan = ranks::DAN_1, - PromoteTo2Dan = ranks::DAN_2, - PromoteTo3Dan = ranks::DAN_3, - PromoteTo4Dan = ranks::DAN_4, - PromoteTo5Dan = ranks::DAN_5, - PromoteTo6Dan = ranks::DAN_6, - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/tracks.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/tracks.rs deleted file mode 100644 index f4ba4e05ec1..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/fellowship/tracks.rs +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Track configurations for Fellowship. - -use crate::{Balance, BlockNumber, RuntimeOrigin, DAYS, DOLLARS, HOURS, MINUTES}; -use pallet_ranked_collective::Rank; -use sp_runtime::{traits::Convert, Perbill}; - -/// Referendum `TrackId` type. -pub type TrackId = u16; - -/// Referendum track IDs. -pub mod constants { - use super::TrackId; - - // Regular tracks (7 days) used for general operations. The required rank for voting is the - // same as that which is named (and also the track ID). - pub const MEMBERS: TrackId = 1; - pub const PROFICIENTS: TrackId = 2; - pub const FELLOWS: TrackId = 3; - pub const ARCHITECTS: TrackId = 4; - pub const ARCHITECTS_ADEPT: TrackId = 5; - pub const GRAND_ARCHITECTS: TrackId = 6; - pub const MASTERS: TrackId = 7; - pub const MASTERS_CONSTANT: TrackId = 8; - pub const GRAND_MASTERS: TrackId = 9; - - // Longer tracks (14 days) used for rank retention. These require a rank of two more than the - // grade at which they retain (as per the whitepaper). This works out as the track ID minus 8. - pub const RETAIN_AT_1DAN: TrackId = 11; - pub const RETAIN_AT_2DAN: TrackId = 12; - pub const RETAIN_AT_3DAN: TrackId = 13; - pub const RETAIN_AT_4DAN: TrackId = 14; - pub const RETAIN_AT_5DAN: TrackId = 15; - pub const RETAIN_AT_6DAN: TrackId = 16; - - // Longest tracks (30 days) used for promotions. These require a rank of two more than the - // grade to which they promote (as per the whitepaper). This works out as the track ID minus 18. - pub const PROMOTE_TO_1DAN: TrackId = 21; - pub const PROMOTE_TO_2DAN: TrackId = 22; - pub const PROMOTE_TO_3DAN: TrackId = 23; - pub const PROMOTE_TO_4DAN: TrackId = 24; - pub const PROMOTE_TO_5DAN: TrackId = 25; - pub const PROMOTE_TO_6DAN: TrackId = 26; -} - -/// Convert the track ID (defined above) into the minimum rank (i.e. fellowship Dan grade) required -/// to vote on the track. -pub struct MinRankOfClass; -impl Convert for MinRankOfClass { - fn convert(a: TrackId) -> Rank { - match a { - // Just a regular vote: the track ID is conveniently the same as the minimum rank. - regular @ 1..=9 => regular, - // A retention vote; the track ID turns out to be 8 more than the minimum required rank. - retention @ 11..=16 => retention - 8, - // A promotion vote; the track ID turns out to be 18 more than the minimum required - // rank. - promotion @ 21..=26 => promotion - 18, - _ => Rank::max_value(), - } - } -} - -const RETAIN_MAX_DECIDING: u32 = 25; -const RETAIN_DECISION_DEPOSIT: Balance = 5 * DOLLARS; -const RETAIN_PREPARE_PERIOD: BlockNumber = 0; -const RETAIN_DECISION_PERIOD: BlockNumber = 14 * DAYS; -const RETAIN_CONFIRM_PERIOD: BlockNumber = 1 * HOURS; -const RETAIN_MIN_ENACTMENT_PERIOD: BlockNumber = 0; -const RETAIN_MIN_APPROVAL: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(60), - ceil: Perbill::from_percent(100), -}; -const RETAIN_MIN_SUPPORT: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(100), -}; - -const PROMOTE_MAX_DECIDING: u32 = 10; -const PROMOTE_DECISION_DEPOSIT: Balance = 5 * DOLLARS; -const PROMOTE_PREPARE_PERIOD: BlockNumber = 0; -const PROMOTE_DECISION_PERIOD: BlockNumber = 30 * DAYS; -const PROMOTE_CONFIRM_PERIOD: BlockNumber = 1 * HOURS; -const PROMOTE_MIN_ENACTMENT_PERIOD: BlockNumber = 0; -const PROMOTE_MIN_APPROVAL: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(60), - ceil: Perbill::from_percent(100), -}; -const PROMOTE_MIN_SUPPORT: pallet_referenda::Curve = pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(10), - ceil: Perbill::from_percent(100), -}; - -pub struct TracksInfo; -impl pallet_referenda::TracksInfo for TracksInfo { - type Id = TrackId; - type RuntimeOrigin = ::PalletsOrigin; - fn tracks() -> &'static [(Self::Id, pallet_referenda::TrackInfo)] { - use constants as tracks; - static DATA: [(TrackId, pallet_referenda::TrackInfo); 21] = [ - ( - tracks::MEMBERS, - pallet_referenda::TrackInfo { - name: "members", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::PROFICIENTS, - pallet_referenda::TrackInfo { - name: "proficient members", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::FELLOWS, - pallet_referenda::TrackInfo { - name: "fellows", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::ARCHITECTS, - pallet_referenda::TrackInfo { - name: "architects", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::ARCHITECTS_ADEPT, - pallet_referenda::TrackInfo { - name: "architects adept", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::GRAND_ARCHITECTS, - pallet_referenda::TrackInfo { - name: "grand architects", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::MASTERS, - pallet_referenda::TrackInfo { - name: "masters", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::MASTERS_CONSTANT, - pallet_referenda::TrackInfo { - name: "masters constant", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::GRAND_MASTERS, - pallet_referenda::TrackInfo { - name: "grand masters", - max_deciding: 10, - decision_deposit: 5 * DOLLARS, - prepare_period: 30 * MINUTES, - decision_period: 7 * DAYS, - confirm_period: 30 * MINUTES, - min_enactment_period: 5 * MINUTES, - min_approval: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(50), - ceil: Perbill::from_percent(100), - }, - min_support: pallet_referenda::Curve::LinearDecreasing { - length: Perbill::from_percent(100), - floor: Perbill::from_percent(0), - ceil: Perbill::from_percent(100), - }, - }, - ), - ( - tracks::RETAIN_AT_1DAN, - pallet_referenda::TrackInfo { - name: "retain at I Dan", - max_deciding: RETAIN_MAX_DECIDING, - decision_deposit: RETAIN_DECISION_DEPOSIT, - prepare_period: RETAIN_PREPARE_PERIOD, - decision_period: RETAIN_DECISION_PERIOD, - confirm_period: RETAIN_CONFIRM_PERIOD, - min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, - min_approval: RETAIN_MIN_APPROVAL, - min_support: RETAIN_MIN_SUPPORT, - }, - ), - ( - tracks::RETAIN_AT_2DAN, - pallet_referenda::TrackInfo { - name: "retain at II Dan", - max_deciding: RETAIN_MAX_DECIDING, - decision_deposit: RETAIN_DECISION_DEPOSIT, - prepare_period: RETAIN_PREPARE_PERIOD, - decision_period: RETAIN_DECISION_PERIOD, - confirm_period: RETAIN_CONFIRM_PERIOD, - min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, - min_approval: RETAIN_MIN_APPROVAL, - min_support: RETAIN_MIN_SUPPORT, - }, - ), - ( - tracks::RETAIN_AT_3DAN, - pallet_referenda::TrackInfo { - name: "retain at III Dan", - max_deciding: RETAIN_MAX_DECIDING, - decision_deposit: RETAIN_DECISION_DEPOSIT, - prepare_period: RETAIN_PREPARE_PERIOD, - decision_period: RETAIN_DECISION_PERIOD, - confirm_period: RETAIN_CONFIRM_PERIOD, - min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, - min_approval: RETAIN_MIN_APPROVAL, - min_support: RETAIN_MIN_SUPPORT, - }, - ), - ( - tracks::RETAIN_AT_4DAN, - pallet_referenda::TrackInfo { - name: "retain at IV Dan", - max_deciding: RETAIN_MAX_DECIDING, - decision_deposit: RETAIN_DECISION_DEPOSIT, - prepare_period: RETAIN_PREPARE_PERIOD, - decision_period: RETAIN_DECISION_PERIOD, - confirm_period: RETAIN_CONFIRM_PERIOD, - min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, - min_approval: RETAIN_MIN_APPROVAL, - min_support: RETAIN_MIN_SUPPORT, - }, - ), - ( - tracks::RETAIN_AT_5DAN, - pallet_referenda::TrackInfo { - name: "retain at V Dan", - max_deciding: RETAIN_MAX_DECIDING, - decision_deposit: RETAIN_DECISION_DEPOSIT, - prepare_period: RETAIN_PREPARE_PERIOD, - decision_period: RETAIN_DECISION_PERIOD, - confirm_period: RETAIN_CONFIRM_PERIOD, - min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, - min_approval: RETAIN_MIN_APPROVAL, - min_support: RETAIN_MIN_SUPPORT, - }, - ), - ( - tracks::RETAIN_AT_6DAN, - pallet_referenda::TrackInfo { - name: "retain at VI Dan", - max_deciding: RETAIN_MAX_DECIDING, - decision_deposit: RETAIN_DECISION_DEPOSIT, - prepare_period: RETAIN_PREPARE_PERIOD, - decision_period: RETAIN_DECISION_PERIOD, - confirm_period: RETAIN_CONFIRM_PERIOD, - min_enactment_period: RETAIN_MIN_ENACTMENT_PERIOD, - min_approval: RETAIN_MIN_APPROVAL, - min_support: RETAIN_MIN_SUPPORT, - }, - ), - ( - tracks::PROMOTE_TO_1DAN, - pallet_referenda::TrackInfo { - name: "promote to I Dan", - max_deciding: PROMOTE_MAX_DECIDING, - decision_deposit: PROMOTE_DECISION_DEPOSIT, - prepare_period: PROMOTE_PREPARE_PERIOD, - decision_period: PROMOTE_DECISION_PERIOD, - confirm_period: PROMOTE_CONFIRM_PERIOD, - min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, - min_approval: PROMOTE_MIN_APPROVAL, - min_support: PROMOTE_MIN_SUPPORT, - }, - ), - ( - tracks::PROMOTE_TO_2DAN, - pallet_referenda::TrackInfo { - name: "promote to II Dan", - max_deciding: PROMOTE_MAX_DECIDING, - decision_deposit: PROMOTE_DECISION_DEPOSIT, - prepare_period: PROMOTE_PREPARE_PERIOD, - decision_period: PROMOTE_DECISION_PERIOD, - confirm_period: PROMOTE_CONFIRM_PERIOD, - min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, - min_approval: PROMOTE_MIN_APPROVAL, - min_support: PROMOTE_MIN_SUPPORT, - }, - ), - ( - tracks::PROMOTE_TO_3DAN, - pallet_referenda::TrackInfo { - name: "promote to III Dan", - max_deciding: PROMOTE_MAX_DECIDING, - decision_deposit: PROMOTE_DECISION_DEPOSIT, - prepare_period: PROMOTE_PREPARE_PERIOD, - decision_period: PROMOTE_DECISION_PERIOD, - confirm_period: PROMOTE_CONFIRM_PERIOD, - min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, - min_approval: PROMOTE_MIN_APPROVAL, - min_support: PROMOTE_MIN_SUPPORT, - }, - ), - ( - tracks::PROMOTE_TO_4DAN, - pallet_referenda::TrackInfo { - name: "promote to IV Dan", - max_deciding: PROMOTE_MAX_DECIDING, - decision_deposit: PROMOTE_DECISION_DEPOSIT, - prepare_period: PROMOTE_PREPARE_PERIOD, - decision_period: PROMOTE_DECISION_PERIOD, - confirm_period: PROMOTE_CONFIRM_PERIOD, - min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, - min_approval: PROMOTE_MIN_APPROVAL, - min_support: PROMOTE_MIN_SUPPORT, - }, - ), - ( - tracks::PROMOTE_TO_5DAN, - pallet_referenda::TrackInfo { - name: "promote to V Dan", - max_deciding: PROMOTE_MAX_DECIDING, - decision_deposit: PROMOTE_DECISION_DEPOSIT, - prepare_period: PROMOTE_PREPARE_PERIOD, - decision_period: PROMOTE_DECISION_PERIOD, - confirm_period: PROMOTE_CONFIRM_PERIOD, - min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, - min_approval: PROMOTE_MIN_APPROVAL, - min_support: PROMOTE_MIN_SUPPORT, - }, - ), - ( - tracks::PROMOTE_TO_6DAN, - pallet_referenda::TrackInfo { - name: "promote to VI Dan", - max_deciding: PROMOTE_MAX_DECIDING, - decision_deposit: PROMOTE_DECISION_DEPOSIT, - prepare_period: PROMOTE_PREPARE_PERIOD, - decision_period: PROMOTE_DECISION_PERIOD, - confirm_period: PROMOTE_CONFIRM_PERIOD, - min_enactment_period: PROMOTE_MIN_ENACTMENT_PERIOD, - min_approval: PROMOTE_MIN_APPROVAL, - min_support: PROMOTE_MIN_SUPPORT, - }, - ), - ]; - &DATA[..] - } - fn track_for(id: &Self::RuntimeOrigin) -> Result { - use super::origins::Origin; - use constants as tracks; - - #[cfg(feature = "runtime-benchmarks")] - { - // For benchmarks, we enable a root origin. - // It is important that this is not available in production! - let root: Self::RuntimeOrigin = frame_system::RawOrigin::Root.into(); - if &root == id { - return Ok(tracks::GRAND_MASTERS) - } - } - - match Origin::try_from(id.clone()) { - Ok(Origin::Members) => Ok(tracks::MEMBERS), - Ok(Origin::Fellowship2Dan) => Ok(tracks::PROFICIENTS), - Ok(Origin::Fellows) => Ok(tracks::FELLOWS), - Ok(Origin::Architects) => Ok(tracks::ARCHITECTS), - Ok(Origin::Fellowship5Dan) => Ok(tracks::ARCHITECTS_ADEPT), - Ok(Origin::Fellowship6Dan) => Ok(tracks::GRAND_ARCHITECTS), - Ok(Origin::Masters) => Ok(tracks::MASTERS), - Ok(Origin::Fellowship8Dan) => Ok(tracks::MASTERS_CONSTANT), - Ok(Origin::Fellowship9Dan) => Ok(tracks::GRAND_MASTERS), - - Ok(Origin::RetainAt1Dan) => Ok(tracks::RETAIN_AT_1DAN), - Ok(Origin::RetainAt2Dan) => Ok(tracks::RETAIN_AT_2DAN), - Ok(Origin::RetainAt3Dan) => Ok(tracks::RETAIN_AT_3DAN), - Ok(Origin::RetainAt4Dan) => Ok(tracks::RETAIN_AT_4DAN), - Ok(Origin::RetainAt5Dan) => Ok(tracks::RETAIN_AT_5DAN), - Ok(Origin::RetainAt6Dan) => Ok(tracks::RETAIN_AT_6DAN), - - Ok(Origin::PromoteTo1Dan) => Ok(tracks::PROMOTE_TO_1DAN), - Ok(Origin::PromoteTo2Dan) => Ok(tracks::PROMOTE_TO_2DAN), - Ok(Origin::PromoteTo3Dan) => Ok(tracks::PROMOTE_TO_3DAN), - Ok(Origin::PromoteTo4Dan) => Ok(tracks::PROMOTE_TO_4DAN), - Ok(Origin::PromoteTo5Dan) => Ok(tracks::PROMOTE_TO_5DAN), - Ok(Origin::PromoteTo6Dan) => Ok(tracks::PROMOTE_TO_6DAN), - - _ => Err(()), - } - } -} -pallet_referenda::impl_tracksinfo_get!(TracksInfo, Balance, BlockNumber); diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/impls.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/impls.rs deleted file mode 100644 index 9f4c2a6a4c9..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/impls.rs +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::OriginCaller; -use frame_support::{ - dispatch::DispatchResultWithPostInfo, - traits::{Currency, Get, Imbalance, OnUnbalanced, OriginTrait, PrivilegeCmp}, - weights::Weight, -}; -use log; -use pallet_alliance::{ProposalIndex, ProposalProvider}; -use parachains_common::impls::NegativeImbalance; -use sp_runtime::DispatchError; -use sp_std::{cmp::Ordering, marker::PhantomData, prelude::*}; -use xcm::latest::{Fungibility, Junction, Parent}; - -type AccountIdOf = ::AccountId; - -type ProposalOf = >::Proposal; - -type HashOf = ::Hash; - -/// Type alias to conveniently refer to the `Currency::Balance` associated type. -pub type BalanceOf = - as Currency<::AccountId>>::Balance; - -/// Implements `OnUnbalanced::on_unbalanced` to teleport slashed assets to relay chain treasury -/// account. -pub struct ToParentTreasury( - PhantomData<(TreasuryAccount, PalletAccount, T)>, -); - -impl OnUnbalanced> - for ToParentTreasury -where - T: pallet_balances::Config + pallet_xcm::Config + frame_system::Config, - <::RuntimeOrigin as OriginTrait>::AccountId: From>, - [u8; 32]: From<::AccountId>, - TreasuryAccount: Get>, - PalletAccount: Get>, - BalanceOf: Into, -{ - fn on_unbalanced(amount: NegativeImbalance) { - let amount = match amount.drop_zero() { - Ok(..) => return, - Err(amount) => amount, - }; - let imbalance = amount.peek(); - let pallet_acc: AccountIdOf = PalletAccount::get(); - let treasury_acc: AccountIdOf = TreasuryAccount::get(); - - >::resolve_creating(&pallet_acc, amount); - - let result = >::teleport_assets( - <::RuntimeOrigin>::signed(pallet_acc.into()), - Box::new(Parent.into()), - Box::new( - Junction::AccountId32 { network: None, id: treasury_acc.into() } - .into_location() - .into(), - ), - Box::new((Parent, imbalance).into()), - 0, - ); - - if let Err(err) = result { - log::warn!("Failed to teleport slashed assets: {:?}", err); - } - } -} - -/// Proposal provider for alliance pallet. -/// Adapter from collective pallet to alliance proposal provider trait. -pub struct AllianceProposalProvider(PhantomData<(T, I)>); - -impl ProposalProvider, HashOf, ProposalOf> - for AllianceProposalProvider -where - T: pallet_collective::Config + frame_system::Config, - I: 'static, -{ - fn propose_proposal( - who: AccountIdOf, - threshold: u32, - proposal: Box>, - length_bound: u32, - ) -> Result<(u32, u32), DispatchError> { - pallet_collective::Pallet::::do_propose_proposed( - who, - threshold, - proposal, - length_bound, - ) - } - - fn vote_proposal( - who: AccountIdOf, - proposal: HashOf, - index: ProposalIndex, - approve: bool, - ) -> Result { - pallet_collective::Pallet::::do_vote(who, proposal, index, approve) - } - - fn close_proposal( - proposal_hash: HashOf, - proposal_index: ProposalIndex, - proposal_weight_bound: Weight, - length_bound: u32, - ) -> DispatchResultWithPostInfo { - pallet_collective::Pallet::::do_close( - proposal_hash, - proposal_index, - proposal_weight_bound, - length_bound, - ) - } - - fn proposal_of(proposal_hash: HashOf) -> Option> { - pallet_collective::Pallet::::proposal_of(proposal_hash) - } -} - -/// Used to compare the privilege of an origin inside the scheduler. -pub struct EqualOrGreatestRootCmp; - -impl PrivilegeCmp for EqualOrGreatestRootCmp { - fn cmp_privilege(left: &OriginCaller, right: &OriginCaller) -> Option { - if left == right { - return Some(Ordering::Equal) - } - match (left, right) { - // Root is greater than anything. - (OriginCaller::system(frame_system::RawOrigin::Root), _) => Some(Ordering::Greater), - _ => None, - } - } -} - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarks { - use super::*; - use crate::ParachainSystem; - use cumulus_primitives_core::{ChannelStatus, GetChannelInfo}; - use frame_support::traits::{ - fungible, - tokens::{Pay, PaymentStatus}, - }; - use pallet_ranked_collective::Rank; - use parachains_common::{AccountId, Balance}; - use sp_runtime::traits::Convert; - - /// Rank to salary conversion helper type. - pub struct RankToSalary(PhantomData); - impl Convert for RankToSalary - where - Fungible: fungible::Inspect, - { - fn convert(r: Rank) -> Balance { - Balance::from(r).saturating_mul(Fungible::minimum_balance()) - } - } - - /// Trait for setting up any prerequisites for successful execution of benchmarks. - pub trait EnsureSuccessful { - fn ensure_successful(); - } - - /// Implementation of the [`EnsureSuccessful`] trait which opens an HRMP channel between - /// the Collectives and a parachain with a given ID. - pub struct OpenHrmpChannel(PhantomData); - impl> EnsureSuccessful for OpenHrmpChannel { - fn ensure_successful() { - if let ChannelStatus::Closed = ParachainSystem::get_channel_status(I::get().into()) { - ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(I::get().into()) - } - } - } - - /// Type that wraps a type implementing the [`Pay`] trait to decorate its - /// [`Pay::ensure_successful`] function with a provided implementation of the - /// [`EnsureSuccessful`] trait. - pub struct PayWithEnsure(PhantomData<(O, E)>); - impl Pay for PayWithEnsure - where - O: Pay, - E: EnsureSuccessful, - { - type AssetKind = O::AssetKind; - type Balance = O::Balance; - type Beneficiary = O::Beneficiary; - type Error = O::Error; - type Id = O::Id; - - fn pay( - who: &Self::Beneficiary, - asset_kind: Self::AssetKind, - amount: Self::Balance, - ) -> Result { - O::pay(who, asset_kind, amount) - } - fn check_payment(id: Self::Id) -> PaymentStatus { - O::check_payment(id) - } - fn ensure_successful( - who: &Self::Beneficiary, - asset_kind: Self::AssetKind, - amount: Self::Balance, - ) { - E::ensure_successful(); - O::ensure_successful(who, asset_kind, amount) - } - fn ensure_concluded(id: Self::Id) { - O::ensure_concluded(id) - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs deleted file mode 100644 index 90c44f8bfc5..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/lib.rs +++ /dev/null @@ -1,1031 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Collectives Parachain -//! -//! This parachain is for collectives that serve the Polkadot network. -//! Each collective is defined by a specialized (possibly instanced) pallet. -//! -//! ### Governance -//! -//! As a system parachain, Collectives defers its governance (namely, its `Root` origin), to -//! its Relay Chain parent, Polkadot. -//! -//! ### Collator Selection -//! -//! Collectives uses `pallet-collator-selection`, a simple first-come-first-served registration -//! system where collators can reserve a small bond to join the block producer set. There is no -//! slashing. Collective members are generally expected to run collators. - -#![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -pub mod ambassador; -pub mod impls; -mod weights; -pub mod xcm_config; -// Fellowship configurations. -pub mod fellowship; -pub use ambassador::pallet_ambassador_origins; - -use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; -use fellowship::{ - migration::import_kusama_fellowship, pallet_fellowship_origins, Fellows, - FellowshipCollectiveInstance, -}; -use impls::{AllianceProposalProvider, EqualOrGreatestRootCmp, ToParentTreasury}; -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, Perbill, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -use codec::{Decode, Encode, MaxEncodedLen}; -use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; -use frame_support::{ - construct_runtime, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - parameter_types, - traits::{ - fungible::HoldConsideration, ConstBool, ConstU16, ConstU32, ConstU64, ConstU8, - EitherOfDiverse, InstanceFilter, LinearStoragePrice, TransformOrigin, - }, - weights::{ConstantMultiplier, Weight}, - PalletId, -}; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, -}; -pub use parachains_common as common; -use parachains_common::{ - impls::DealWithFees, - message_queue::*, - polkadot::{account::*, consensus::*, currency::*, fee::WeightToFee}, - AccountId, AuraId, Balance, BlockNumber, Hash, Header, Nonce, Signature, - AVERAGE_ON_INITIALIZE_RATIO, DAYS, HOURS, MAXIMUM_BLOCK_WEIGHT, MINUTES, NORMAL_DISPATCH_RATIO, - SLOT_DURATION, -}; -use sp_runtime::RuntimeDebug; -use xcm_config::{GovernanceLocation, XcmOriginToTransactDispatchOrigin}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -// Polkadot imports -use pallet_xcm::{EnsureXcm, IsVoiceOfBody}; -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; -use xcm::latest::{prelude::*, BodyId}; - -use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("collectives"), - impl_name: create_runtime_str!("collectives"), - authoring_version: 1, - spec_version: 1_004_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 5, - state_version: 0, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -/// Privileged origin that represents Root or more than two thirds of the Alliance. -pub type RootOrAllianceTwoThirdsMajority = EitherOfDiverse< - EnsureRoot, - pallet_collective::EnsureProportionMoreThan, ->; - -parameter_types! { - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} - -// Configure FRAME pallets to include in runtime. -impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; - type BlockWeights = RuntimeBlockWeights; - type BlockLength = RuntimeBlockLength; - type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; - type Nonce = Nonce; - type Hash = Hash; - type Hashing = BlakeTwo256; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type BlockHashCount = BlockHashCount; - type DbWeight = RocksDbWeight; - type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); - type AccountData = pallet_balances::AccountData; - type SystemWeightInfo = weights::frame_system::WeightInfo; - type SS58Prefix = ConstU16<0>; - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = weights::pallet_timestamp::WeightInfo; -} - -impl pallet_authorship::Config for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type EventHandler = (CollatorSelection,); -} - -parameter_types! { - pub const ExistentialDeposit: Balance = EXISTENTIAL_DEPOSIT; -} - -impl pallet_balances::Config for Runtime { - type MaxLocks = ConstU32<50>; - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = weights::pallet_balances::WeightInfo; - type MaxReserves = ConstU32<50>; - type ReserveIdentifier = [u8; 8]; - type RuntimeHoldReason = RuntimeHoldReason; - type RuntimeFreezeReason = RuntimeFreezeReason; - type FreezeIdentifier = (); - type MaxHolds = ConstU32<1>; - type MaxFreezes = ConstU32<0>; -} - -parameter_types! { - /// Relay Chain `TransactionByteFee` / 10 - pub const TransactionByteFee: Balance = MILLICENTS; -} - -impl pallet_transaction_payment::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnChargeTransaction = - pallet_transaction_payment::CurrencyAdapter>; - type WeightToFee = WeightToFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; - type OperationalFeeMultiplier = ConstU8<5>; -} - -parameter_types! { - // One storage item; key size is 32; value is size 4+4+16+32 bytes = 56 bytes. - pub const DepositBase: Balance = deposit(1, 88); - // Additional storage item size of 32 bytes. - pub const DepositFactor: Balance = deposit(0, 32); -} - -impl pallet_multisig::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type DepositBase = DepositBase; - type DepositFactor = DepositFactor; - type MaxSignatories = ConstU32<100>; - type WeightInfo = weights::pallet_multisig::WeightInfo; -} - -impl pallet_utility::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type PalletsOrigin = OriginCaller; - type WeightInfo = weights::pallet_utility::WeightInfo; -} - -parameter_types! { - // One storage item; key size 32, value size 8; . - pub const ProxyDepositBase: Balance = deposit(1, 40); - // Additional storage item size of 33 bytes. - pub const ProxyDepositFactor: Balance = deposit(0, 33); - // One storage item; key size 32, value size 16 - pub const AnnouncementDepositBase: Balance = deposit(1, 48); - pub const AnnouncementDepositFactor: Balance = deposit(0, 66); -} - -/// The type used to represent the kinds of proxying allowed. -#[derive( - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - RuntimeDebug, - MaxEncodedLen, - scale_info::TypeInfo, -)] -pub enum ProxyType { - /// Fully permissioned proxy. Can execute any call on behalf of _proxied_. - Any, - /// Can execute any call that does not transfer funds. - NonTransfer, - /// Proxy with the ability to reject time-delay proxy announcements. - CancelProxy, - /// Collator selection proxy. Can execute calls related to collator selection mechanism. - Collator, - /// Alliance proxy. Allows calls related to the Alliance. - Alliance, - /// Fellowship proxy. Allows calls related to the Fellowship. - Fellowship, - /// Ambassador proxy. Allows calls related to the Ambassador Program. - Ambassador, -} -impl Default for ProxyType { - fn default() -> Self { - Self::Any - } -} -impl InstanceFilter for ProxyType { - fn filter(&self, c: &RuntimeCall) -> bool { - match self { - ProxyType::Any => true, - ProxyType::NonTransfer => !matches!(c, RuntimeCall::Balances { .. }), - ProxyType::CancelProxy => matches!( - c, - RuntimeCall::Proxy(pallet_proxy::Call::reject_announcement { .. }) | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Collator => matches!( - c, - RuntimeCall::CollatorSelection { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Alliance => matches!( - c, - RuntimeCall::AllianceMotion { .. } | - RuntimeCall::Alliance { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Fellowship => matches!( - c, - RuntimeCall::FellowshipCollective { .. } | - RuntimeCall::FellowshipReferenda { .. } | - RuntimeCall::FellowshipCore { .. } | - RuntimeCall::FellowshipSalary { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - ProxyType::Ambassador => matches!( - c, - RuntimeCall::AmbassadorCollective { .. } | - RuntimeCall::AmbassadorReferenda { .. } | - RuntimeCall::AmbassadorContent { .. } | - RuntimeCall::AmbassadorCore { .. } | - RuntimeCall::AmbassadorSalary { .. } | - RuntimeCall::Utility { .. } | - RuntimeCall::Multisig { .. } - ), - } - } - fn is_superset(&self, o: &Self) -> bool { - match (self, o) { - (x, y) if x == y => true, - (ProxyType::Any, _) => true, - (_, ProxyType::Any) => false, - (ProxyType::NonTransfer, _) => true, - _ => false, - } - } -} - -impl pallet_proxy::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type Currency = Balances; - type ProxyType = ProxyType; - type ProxyDepositBase = ProxyDepositBase; - type ProxyDepositFactor = ProxyDepositFactor; - type MaxProxies = ConstU32<32>; - type WeightInfo = weights::pallet_proxy::WeightInfo; - type MaxPending = ConstU32<32>; - type CallHasher = BlakeTwo256; - type AnnouncementDepositBase = AnnouncementDepositBase; - type AnnouncementDepositFactor = AnnouncementDepositFactor; -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(4); -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type ReservedDmpWeight = ReservedDmpWeight; - type OutboundXcmpMessageSource = XcmpQueue; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; - type CheckAssociatedRelayNumber = RelayNumberStrictlyIncreases; - type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, - >; -} - -impl parachain_info::Config for Runtime {} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(35) * RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - // The XCMP queue pallet is only ever able to handle the `Sibling(ParaId)` origin: - type QueueChangeHandler = NarrowOriginToSibling; - type QueuePausedQuery = NarrowOriginToSibling; - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -parameter_types! { - /// The asset ID for the asset that we use to pay for message delivery fees. - pub FeeAssetId: AssetId = Concrete(xcm_config::DotLocation::get()); - /// The base fee for the message delivery fees. - pub const BaseDeliveryFee: u128 = CENTS.saturating_mul(3); -} - -pub type PriceForSiblingParachainDelivery = polkadot_runtime_common::xcm_sender::ExponentialPrice< - FeeAssetId, - BaseDeliveryFee, - TransactionByteFee, - XcmpQueue, ->; - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ChannelInfo = ParachainSystem; - type VersionWrapper = PolkadotXcm; - // Enqueue XCMP messages from siblings for later processing. - type XcmpQueue = TransformOrigin; - type MaxInboundSuspended = sp_core::ConstU32<1_000>; - type ControllerOrigin = EitherOfDiverse, Fellows>; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type WeightInfo = weights::cumulus_pallet_xcmp_queue::WeightInfo; - type PriceForSiblingDelivery = - polkadot_runtime_common::xcm_sender::NoPriceForMessageDelivery; -} - -parameter_types! { - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type WeightInfo = weights::cumulus_pallet_dmp_queue::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type DmpSink = frame_support::traits::EnqueueWithOrigin; -} - -pub const PERIOD: u32 = 6 * HOURS; -pub const OFFSET: u32 = 0; - -impl pallet_session::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type ValidatorId = ::AccountId; - // we don't have stash and controller, thus we don't need the convert as well. - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ShouldEndSession = pallet_session::PeriodicSessions, ConstU32>; - type NextSessionRotation = pallet_session::PeriodicSessions, ConstU32>; - type SessionManager = CollatorSelection; - // Essentially just Aura, but let's be pedantic. - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type WeightInfo = weights::pallet_session::WeightInfo; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - #[cfg(feature = "experimental")] - type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; -} - -parameter_types! { - pub const PotId: PalletId = PalletId(*b"PotStake"); - pub const SessionLength: BlockNumber = 6 * HOURS; - // `StakingAdmin` pluralistic body. - pub const StakingAdminBodyId: BodyId = BodyId::Defense; -} - -/// We allow root and the `StakingAdmin` to execute privileged collator selection operations. -pub type CollatorSelectionUpdateOrigin = EitherOfDiverse< - EnsureRoot, - EnsureXcm>, ->; - -impl pallet_collator_selection::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type UpdateOrigin = CollatorSelectionUpdateOrigin; - type PotId = PotId; - type MaxCandidates = ConstU32<100>; - type MinEligibleCollators = ConstU32<4>; - type MaxInvulnerables = ConstU32<20>; - // should be a multiple of session or things will get inconsistent - type KickThreshold = ConstU32; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_collator_selection::IdentityCollator; - type ValidatorRegistration = Session; - type WeightInfo = weights::pallet_collator_selection::WeightInfo; -} - -pub const ALLIANCE_MOTION_DURATION: BlockNumber = 5 * DAYS; - -parameter_types! { - pub const AllianceMotionDuration: BlockNumber = ALLIANCE_MOTION_DURATION; - pub MaxProposalWeight: Weight = Perbill::from_percent(50) * RuntimeBlockWeights::get().max_block; -} -pub const ALLIANCE_MAX_PROPOSALS: u32 = 100; -pub const ALLIANCE_MAX_MEMBERS: u32 = 100; - -type AllianceCollective = pallet_collective::Instance1; -impl pallet_collective::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type Proposal = RuntimeCall; - type RuntimeEvent = RuntimeEvent; - type MotionDuration = AllianceMotionDuration; - type MaxProposals = ConstU32; - type MaxMembers = ConstU32; - type DefaultVote = pallet_collective::MoreThanMajorityThenPrimeDefaultVote; - type SetMembersOrigin = EnsureRoot; - type WeightInfo = weights::pallet_collective::WeightInfo; - type MaxProposalWeight = MaxProposalWeight; -} - -pub const MAX_FELLOWS: u32 = ALLIANCE_MAX_MEMBERS; -pub const MAX_ALLIES: u32 = 100; - -parameter_types! { - pub const AllyDeposit: Balance = 1_000 * UNITS; // 1,000 DOT bond to join as an Ally - // The Alliance pallet account, used as a temporary place to deposit a slashed imbalance - // before the teleport to the Treasury. - pub AlliancePalletAccount: AccountId = ALLIANCE_PALLET_ID.into_account_truncating(); - pub PolkadotTreasuryAccount: AccountId = POLKADOT_TREASURY_PALLET_ID.into_account_truncating(); - // The number of blocks a member must wait between giving a retirement notice and retiring. - // Supposed to be greater than time required to `kick_member` with alliance motion. - pub const AllianceRetirementPeriod: BlockNumber = (90 * DAYS) + ALLIANCE_MOTION_DURATION; -} - -impl pallet_alliance::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Proposal = RuntimeCall; - type AdminOrigin = RootOrAllianceTwoThirdsMajority; - type MembershipManager = RootOrAllianceTwoThirdsMajority; - type AnnouncementOrigin = RootOrAllianceTwoThirdsMajority; - type Currency = Balances; - type Slashed = ToParentTreasury; - type InitializeMembers = AllianceMotion; - type MembershipChanged = AllianceMotion; - type RetirementPeriod = AllianceRetirementPeriod; - type IdentityVerifier = (); // Don't block accounts on identity criteria - type ProposalProvider = AllianceProposalProvider; - type MaxProposals = ConstU32; - type MaxFellows = ConstU32; - type MaxAllies = ConstU32; - type MaxUnscrupulousItems = ConstU32<100>; - type MaxWebsiteUrlLength = ConstU32<255>; - type MaxAnnouncementsCount = ConstU32<100>; - type MaxMembersCount = ConstU32; - type AllyDeposit = AllyDeposit; - type WeightInfo = weights::pallet_alliance::WeightInfo; -} - -parameter_types! { - pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * RuntimeBlockWeights::get().max_block; -} - -#[cfg(not(feature = "runtime-benchmarks"))] -parameter_types! { - pub const MaxScheduledPerBlock: u32 = 50; -} - -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - pub const MaxScheduledPerBlock: u32 = 200; -} - -impl pallet_scheduler::Config for Runtime { - type RuntimeOrigin = RuntimeOrigin; - type RuntimeEvent = RuntimeEvent; - type PalletsOrigin = OriginCaller; - type RuntimeCall = RuntimeCall; - type MaximumWeight = MaximumSchedulerWeight; - type ScheduleOrigin = EnsureRoot; - type MaxScheduledPerBlock = MaxScheduledPerBlock; - type WeightInfo = weights::pallet_scheduler::WeightInfo; - type OriginPrivilegeCmp = EqualOrGreatestRootCmp; - type Preimages = Preimage; -} - -parameter_types! { - pub const PreimageBaseDeposit: Balance = deposit(2, 64); - pub const PreimageByteDeposit: Balance = deposit(0, 1); - pub const PreimageHoldReason: RuntimeHoldReason = RuntimeHoldReason::Preimage(pallet_preimage::HoldReason::Preimage); -} - -impl pallet_preimage::Config for Runtime { - type WeightInfo = weights::pallet_preimage::WeightInfo; - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type ManagerOrigin = EnsureRoot; - type Consideration = HoldConsideration< - AccountId, - Balances, - PreimageHoldReason, - LinearStoragePrice, - >; -} - -// Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime - { - // System support stuff. - System: frame_system::{Pallet, Call, Config, Storage, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 2, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 3, - - // Monetary stuff. - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 10, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event} = 11, - - // Collator support. the order of these 5 are important and shall not change. - Authorship: pallet_authorship::{Pallet, Storage} = 20, - CollatorSelection: pallet_collator_selection::{Pallet, Call, Storage, Event, Config} = 21, - Session: pallet_session::{Pallet, Call, Storage, Event, Config} = 22, - Aura: pallet_aura::{Pallet, Storage, Config} = 23, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 24, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 30, - PolkadotXcm: pallet_xcm::{Pallet, Call, Storage, Event, Origin, Config} = 31, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Event, Origin} = 32, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 33, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 34, - - // Handy utilities. - Utility: pallet_utility::{Pallet, Call, Event} = 40, - Multisig: pallet_multisig::{Pallet, Call, Storage, Event} = 41, - Proxy: pallet_proxy::{Pallet, Call, Storage, Event} = 42, - Preimage: pallet_preimage::{Pallet, Call, Storage, Event, HoldReason} = 43, - Scheduler: pallet_scheduler::{Pallet, Call, Storage, Event} = 44, - - // The main stage. - - // The Alliance. - Alliance: pallet_alliance::{Pallet, Call, Storage, Event, Config} = 50, - AllianceMotion: pallet_collective::::{Pallet, Call, Storage, Origin, Event, Config} = 51, - - // The Fellowship. - // pub type FellowshipCollectiveInstance = pallet_ranked_collective::Instance1; - FellowshipCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 60, - // pub type FellowshipReferendaInstance = pallet_referenda::Instance1; - FellowshipReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 61, - FellowshipOrigins: pallet_fellowship_origins::{Origin} = 62, - // pub type FellowshipCoreInstance = pallet_core_fellowship::Instance1; - FellowshipCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 63, - // pub type FellowshipSalaryInstance = pallet_salary::Instance1; - FellowshipSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 64, - - // Ambassador Program. - AmbassadorCollective: pallet_ranked_collective::::{Pallet, Call, Storage, Event} = 70, - AmbassadorReferenda: pallet_referenda::::{Pallet, Call, Storage, Event} = 71, - AmbassadorOrigins: pallet_ambassador_origins::{Origin} = 72, - AmbassadorCore: pallet_core_fellowship::::{Pallet, Call, Storage, Event} = 73, - AmbassadorSalary: pallet_salary::::{Pallet, Call, Storage, Event} = 74, - AmbassadorContent: pallet_collective_content::::{Pallet, Call, Storage, Event} = 75, - } -); - -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, -); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; -/// All migrations executed on runtime upgrade as a nested tuple of types implementing -/// `OnRuntimeUpgrade`. Included migrations must be idempotent. -type Migrations = ( - // v9420 - import_kusama_fellowship::Migration, - // unreleased - pallet_collator_selection::migration::v1::MigrateToV1, -); - -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, - Migrations, ->; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - frame_benchmarking::define_benchmarks!( - [frame_system, SystemBench::] - [pallet_balances, Balances] - [pallet_message_queue, MessageQueue] - [pallet_multisig, Multisig] - [pallet_proxy, Proxy] - [pallet_session, SessionBench::] - [pallet_utility, Utility] - [pallet_timestamp, Timestamp] - [pallet_collator_selection, CollatorSelection] - [cumulus_pallet_parachain_system, ParachainSystem] - [cumulus_pallet_xcmp_queue, XcmpQueue] - [cumulus_pallet_dmp_queue, DmpQueue] - [pallet_alliance, Alliance] - [pallet_collective, AllianceMotion] - [pallet_xcm, PalletXcmExtrinsicsBenchmark::] - [pallet_preimage, Preimage] - [pallet_scheduler, Scheduler] - [pallet_referenda, FellowshipReferenda] - [pallet_ranked_collective, FellowshipCollective] - [pallet_core_fellowship, FellowshipCore] - [pallet_salary, FellowshipSalary] - [pallet_referenda, AmbassadorReferenda] - [pallet_ranked_collective, AmbassadorCollective] - [pallet_collective_content, AmbassadorContent] - [pallet_core_fellowship, AmbassadorCore] - [pallet_salary, AmbassadorSalary] - ); -} - -impl_runtime_apis! { - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().into_inner() - } - } - - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentCallApi - for Runtime - { - fn query_call_info( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::RuntimeDispatchInfo { - TransactionPayment::query_call_info(call, len) - } - fn query_call_fee_details( - call: RuntimeCall, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_call_fee_details(call, len) - } - fn query_weight_to_fee(weight: Weight) -> Balance { - TransactionPayment::weight_to_fee(weight) - } - fn query_length_to_fee(length: u32) -> Balance { - TransactionPayment::length_to_fee(length) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - #[cfg(feature = "try-runtime")] - impl frame_try_runtime::TryRuntime for Runtime { - fn on_runtime_upgrade(checks: frame_try_runtime::UpgradeCheckSelect) -> (Weight, Weight) { - let weight = Executive::try_runtime_upgrade(checks).unwrap(); - (weight, RuntimeBlockWeights::get().max_block) - } - - fn execute_block( - block: Block, - state_root_check: bool, - signature_check: bool, - select: frame_try_runtime::TryStateSelect, - ) -> Weight { - // NOTE: intentional unwrap: we don't want to propagate the error backwards, and want to - // have a backtrace here. - Executive::try_execute_block(block, state_root_check, signature_check, select).unwrap() - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; - use sp_storage::TrackedStorageKey; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use cumulus_pallet_session_benchmarking::Pallet as SessionBench; - impl cumulus_pallet_session_benchmarking::Config for Runtime {} - - use pallet_xcm::benchmarking::Pallet as PalletXcmExtrinsicsBenchmark; - impl pallet_xcm::benchmarking::Config for Runtime { - fn reachable_dest() -> Option { - Some(Parent.into()) - } - - fn teleportable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // Relay/native token can be teleported between Collectives and Relay. - Some(( - MultiAsset { - fun: Fungible(EXISTENTIAL_DEPOSIT), - id: Concrete(Parent.into()) - }.into(), - Parent.into(), - )) - } - - fn reserve_transferable_asset_and_dest() -> Option<(MultiAsset, MultiLocation)> { - // Reserve transfers are disabled on Collectives. - None - } - } - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/block_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/block_weights.rs deleted file mode 100644 index e7fdb2aae2a..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/block_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Importing a block with 0 Extrinsics. - pub const BlockExecutionWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(5_000_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::BlockExecutionWeight::get(); - - // At least 100 µs. - assert!( - w.ref_time() >= 100u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 100 µs." - ); - // At most 50 ms. - assert!( - w.ref_time() <= 50u64 * constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 50 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs deleted file mode 100644 index cc41dcd6cbb..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_dmp_queue.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `cumulus_pallet_dmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=cumulus_pallet_dmp_queue -// --chain=asset-hub-kusama-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_dmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_dmp_queue::WeightInfo for WeightInfo { - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65696` - // Estimated: `69161` - // Minimum execution time: 124_651_000 picoseconds. - Weight::from_parts(127_857_000, 0) - .saturating_add(Weight::from_parts(0, 69161)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca7d95d3e948effbeccff2de2c182672836` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65659` - // Estimated: `69124` - // Minimum execution time: 65_684_000 picoseconds. - Weight::from_parts(68_039_000, 0) - .saturating_add(Weight::from_parts(0, 69124)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_overweight_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65726` - // Estimated: `69191` - // Minimum execution time: 117_657_000 picoseconds. - Weight::from_parts(122_035_000, 0) - .saturating_add(Weight::from_parts(0, 69191)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `DmpQueue::MigrationStatus` (r:1 w:1) - /// Proof: `DmpQueue::MigrationStatus` (`max_values`: Some(1), `max_size`: Some(1028), added: 1523, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca754904d6d8c6fe06c4e5965f9b8397421` (r:1 w:0) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca70f923ef3252d0166429d36d20ed665a8` (r:1 w:1) - /// Storage: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - /// Proof: UNKNOWN KEY `0xcd5c1f6df63bc97f4a8ce37f14a50ca772275f64c354954352b71eea39cfaca2` (r:1 w:1) - fn on_idle_overweight_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65689` - // Estimated: `69154` - // Minimum execution time: 59_799_000 picoseconds. - Weight::from_parts(61_354_000, 0) - .saturating_add(Weight::from_parts(0, 69154)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index 0b7a2fc21cd..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// collectives-polkadot-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/collectives/collectives-polkadot/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `48` - // Estimated: `8121` - // Minimum execution time: 1_988_000 picoseconds. - Weight::from_parts(2_039_000, 0) - .saturating_add(Weight::from_parts(0, 8121)) - // Standard Error: 30_660 - .saturating_add(Weight::from_parts(24_419_204, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs deleted file mode 100644 index e68c075bffc..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_xcmp_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `Olivers-MacBook-Pro.local`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --pallet -// cumulus-pallet-xcmp-queue -// --chain -// collectives-polkadot-dev -// --output -// cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/cumulus_pallet_xcmp_queue.rs -// --extrinsic -// - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_xcmp_queue`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_xcmp_queue::WeightInfo for WeightInfo { - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:1) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_config_with_u32() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1627` - // Minimum execution time: 5_000_000 picoseconds. - Weight::from_parts(6_000_000, 0) - .saturating_add(Weight::from_parts(0, 1627)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn enqueue_xcmp_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `148` - // Estimated: `3517` - // Minimum execution time: 14_000_000 picoseconds. - Weight::from_parts(14_000_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn suspend_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1627` - // Minimum execution time: 3_000_000 picoseconds. - Weight::from_parts(3_000_000, 0) - .saturating_add(Weight::from_parts(0, 1627)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn resume_channel() -> Weight { - // Proof Size summary in bytes: - // Measured: `177` - // Estimated: `1662` - // Minimum execution time: 4_000_000 picoseconds. - Weight::from_parts(5_000_000, 0) - .saturating_add(Weight::from_parts(0, 1662)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn take_first_concatenated_xcm() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Storage: `XcmpQueue::InboundXcmpMessages` (r:1 w:1) - /// Proof: `XcmpQueue::InboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) - /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) - /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `XcmpQueue::QueueConfig` (r:1 w:0) - /// Proof: `XcmpQueue::QueueConfig` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::InboundXcmpSuspended` (r:1 w:0) - /// Proof: `XcmpQueue::InboundXcmpSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) - fn on_idle_good_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65777` - // Estimated: `69242` - // Minimum execution time: 60_000_000 picoseconds. - Weight::from_parts(63_000_000, 0) - .saturating_add(Weight::from_parts(0, 69242)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - /// Proof: UNKNOWN KEY `0x7b3237373ffdfeb1cab4222e3b520d6b345d8e88afa015075c945637c07e8f20` (r:1 w:1) - fn on_idle_large_msg() -> Weight { - // Proof Size summary in bytes: - // Measured: `65776` - // Estimated: `69241` - // Minimum execution time: 41_000_000 picoseconds. - Weight::from_parts(43_000_000, 0) - .saturating_add(Weight::from_parts(0, 69241)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/extrinsic_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/extrinsic_weights.rs deleted file mode 100644 index 1a4adb968bb..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/extrinsic_weights.rs +++ /dev/null @@ -1,53 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, Weight}, - }; - - parameter_types! { - /// Executing a NO-OP `System::remarks` Extrinsic. - pub const ExtrinsicBaseWeight: Weight = - Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS.saturating_mul(125_000), 0); - } - - #[cfg(test)] - mod test_weights { - use frame_support::weights::constants; - - /// Checks that the weight exists and is sane. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - let w = super::constants::ExtrinsicBaseWeight::get(); - - // At least 10 µs. - assert!( - w.ref_time() >= 10u64 * constants::WEIGHT_REF_TIME_PER_MICROS, - "Weight should be at least 10 µs." - ); - // At most 1 ms. - assert!( - w.ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs deleted file mode 100644 index b6f1dc8dc08..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_926_000 picoseconds. - Weight::from_parts(1_929_666, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_221_000 picoseconds. - Weight::from_parts(34_449_539, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 7 - .saturating_add(Weight::from_parts(1_706, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_681_000 picoseconds. - Weight::from_parts(3_857_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `156` - // Estimated: `1641` - // Minimum execution time: 101_899_621_000 picoseconds. - Weight::from_parts(106_377_672_000, 0) - .saturating_add(Weight::from_parts(0, 1641)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_039_000 picoseconds. - Weight::from_parts(2_094_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2_124 - .saturating_add(Weight::from_parts(754_465, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_103_000 picoseconds. - Weight::from_parts(2_182_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_031 - .saturating_add(Weight::from_parts(570_563, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `82 + p * (69 ±0)` - // Estimated: `78 + p * (70 ±0)` - // Minimum execution time: 3_728_000 picoseconds. - Weight::from_parts(3_836_000, 0) - .saturating_add(Weight::from_parts(0, 78)) - // Standard Error: 1_802 - .saturating_add(Weight::from_parts(1_199_345, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs deleted file mode 100644 index 1d877fdbd2b..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/mod.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod block_weights; -pub mod cumulus_pallet_dmp_queue; -pub mod cumulus_pallet_parachain_system; -pub mod cumulus_pallet_xcmp_queue; -pub mod extrinsic_weights; -pub mod frame_system; -pub mod pallet_alliance; -pub mod pallet_balances; -pub mod pallet_collator_selection; -pub mod pallet_collective; -pub mod pallet_collective_content; -pub mod pallet_core_fellowship_ambassador_core; -pub mod pallet_core_fellowship_fellowship_core; -pub mod pallet_message_queue; -pub mod pallet_multisig; -pub mod pallet_preimage; -pub mod pallet_proxy; -pub mod pallet_ranked_collective_ambassador_collective; -pub mod pallet_ranked_collective_fellowship_collective; -pub mod pallet_referenda_ambassador_referenda; -pub mod pallet_referenda_fellowship_referenda; -pub mod pallet_salary_ambassador_salary; -pub mod pallet_salary_fellowship_salary; -pub mod pallet_scheduler; -pub mod pallet_session; -pub mod pallet_timestamp; -pub mod pallet_utility; -pub mod pallet_xcm; -pub mod paritydb_weights; -pub mod rocksdb_weights; - -pub use block_weights::constants::BlockExecutionWeight; -pub use extrinsic_weights::constants::ExtrinsicBaseWeight; -pub use paritydb_weights::constants::ParityDbWeight; -pub use rocksdb_weights::constants::RocksDbWeight; diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs deleted file mode 100644 index d8ede609a67..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_alliance.rs +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_alliance` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_alliance -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_alliance`. -pub struct WeightInfo(PhantomData); -impl pallet_alliance::WeightInfo for WeightInfo { - /// Storage: `Alliance::Members` (r:1 w:0) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) - /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Voting` (r:0 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `439 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `6676 + m * (32 ±0) + p * (36 ±0)` - // Minimum execution time: 32_783_000 picoseconds. - Weight::from_parts(32_174_037, 0) - .saturating_add(Weight::from_parts(0, 6676)) - // Standard Error: 198 - .saturating_add(Weight::from_parts(1_220, 0).saturating_mul(b.into())) - // Standard Error: 2_074 - .saturating_add(Weight::from_parts(40_945, 0).saturating_mul(m.into())) - // Standard Error: 2_048 - .saturating_add(Weight::from_parts(181_087, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: `Alliance::Members` (r:1 w:0) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `868 + m * (64 ±0)` - // Estimated: `6676 + m * (64 ±0)` - // Minimum execution time: 28_520_000 picoseconds. - Weight::from_parts(29_661_024, 0) - .saturating_add(Weight::from_parts(0, 6676)) - // Standard Error: 2_336 - .saturating_add(Weight::from_parts(89_873, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: `Alliance::Members` (r:1 w:0) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `312 + m * (96 ±0) + p * (36 ±0)` - // Estimated: `6676 + m * (97 ±0) + p * (36 ±0)` - // Minimum execution time: 39_353_000 picoseconds. - Weight::from_parts(33_028_008, 0) - .saturating_add(Weight::from_parts(0, 6676)) - // Standard Error: 2_137 - .saturating_add(Weight::from_parts(90_946, 0).saturating_mul(m.into())) - // Standard Error: 2_084 - .saturating_add(Weight::from_parts(175_827, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: `Alliance::Members` (r:1 w:0) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(_b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `762 + m * (96 ±0) + p * (41 ±0)` - // Estimated: `6676 + m * (97 ±0) + p * (40 ±0)` - // Minimum execution time: 52_835_000 picoseconds. - Weight::from_parts(45_963_292, 0) - .saturating_add(Weight::from_parts(0, 6676)) - // Standard Error: 3_189 - .saturating_add(Weight::from_parts(111_627, 0).saturating_mul(m.into())) - // Standard Error: 3_109 - .saturating_add(Weight::from_parts(207_923, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 97).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: `Alliance::Members` (r:1 w:0) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:1 w:0) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Alliance::Rule` (r:0 w:1) - /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `518 + m * (96 ±0) + p * (41 ±0)` - // Estimated: `6676 + m * (109 ±0) + p * (43 ±0)` - // Minimum execution time: 49_980_000 picoseconds. - Weight::from_parts(48_110_301, 0) - .saturating_add(Weight::from_parts(0, 6676)) - // Standard Error: 5_057 - .saturating_add(Weight::from_parts(169_065, 0).saturating_mul(m.into())) - // Standard Error: 4_995 - .saturating_add(Weight::from_parts(201_349, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 109).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 43).saturating_mul(p.into())) - } - /// Storage: `Alliance::Members` (r:1 w:0) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:1 w:0) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `b` is `[1, 1024]`. - /// The range of component `m` is `[5, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(_b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `417 + m * (96 ±0) + p * (36 ±0)` - // Estimated: `6676 + m * (96 ±0) + p * (36 ±0)` - // Minimum execution time: 40_646_000 picoseconds. - Weight::from_parts(36_865_909, 0) - .saturating_add(Weight::from_parts(0, 6676)) - // Standard Error: 2_136 - .saturating_add(Weight::from_parts(74_341, 0).saturating_mul(m.into())) - // Standard Error: 2_059 - .saturating_add(Weight::from_parts(170_035, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 96).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: `Alliance::Members` (r:2 w:2) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Members` (r:1 w:1) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// The range of component `m` is `[1, 100]`. - /// The range of component `z` is `[0, 100]`. - fn init_members(m: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `12362` - // Minimum execution time: 29_710_000 picoseconds. - Weight::from_parts(17_762_170, 0) - .saturating_add(Weight::from_parts(0, 12362)) - // Standard Error: 1_652 - .saturating_add(Weight::from_parts(156_967, 0).saturating_mul(m.into())) - // Standard Error: 1_632 - .saturating_add(Weight::from_parts(130_352, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Alliance::Members` (r:2 w:2) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:0) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Alliance::DepositOf` (r:200 w:50) - /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:50 w:50) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Members` (r:0 w:1) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:0 w:1) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// The range of component `x` is `[1, 100]`. - /// The range of component `y` is `[0, 100]`. - /// The range of component `z` is `[0, 50]`. - fn disband(x: u32, y: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + x * (52 ±0) + y * (53 ±0) + z * (250 ±0)` - // Estimated: `12362 + x * (2539 ±0) + y * (2539 ±0) + z * (2603 ±1)` - // Minimum execution time: 294_258_000 picoseconds. - Weight::from_parts(295_116_000, 0) - .saturating_add(Weight::from_parts(0, 12362)) - // Standard Error: 23_663 - .saturating_add(Weight::from_parts(553_978, 0).saturating_mul(x.into())) - // Standard Error: 23_549 - .saturating_add(Weight::from_parts(567_024, 0).saturating_mul(y.into())) - // Standard Error: 47_055 - .saturating_add(Weight::from_parts(15_439_056, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(z.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(z.into()))) - .saturating_add(Weight::from_parts(0, 2539).saturating_mul(x.into())) - .saturating_add(Weight::from_parts(0, 2539).saturating_mul(y.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(z.into())) - } - /// Storage: `Alliance::Rule` (r:0 w:1) - /// Proof: `Alliance::Rule` (`max_values`: Some(1), `max_size`: Some(87), added: 582, mode: `MaxEncodedLen`) - fn set_rule() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_538_000 picoseconds. - Weight::from_parts(8_752_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Alliance::Announcements` (r:1 w:1) - /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) - fn announce() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `10187` - // Minimum execution time: 11_213_000 picoseconds. - Weight::from_parts(11_792_000, 0) - .saturating_add(Weight::from_parts(0, 10187)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Alliance::Announcements` (r:1 w:1) - /// Proof: `Alliance::Announcements` (`max_values`: Some(1), `max_size`: Some(8702), added: 9197, mode: `MaxEncodedLen`) - fn remove_announcement() -> Weight { - // Proof Size summary in bytes: - // Measured: `149` - // Estimated: `10187` - // Minimum execution time: 12_477_000 picoseconds. - Weight::from_parts(12_942_000, 0) - .saturating_add(Weight::from_parts(0, 10187)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Alliance::Members` (r:3 w:1) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) - /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Alliance::DepositOf` (r:0 w:1) - /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) - fn join_alliance() -> Weight { - // Proof Size summary in bytes: - // Measured: `294` - // Estimated: `18048` - // Minimum execution time: 41_517_000 picoseconds. - Weight::from_parts(42_433_000, 0) - .saturating_add(Weight::from_parts(0, 18048)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Alliance::Members` (r:3 w:1) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:0) - /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - fn nominate_ally() -> Weight { - // Proof Size summary in bytes: - // Measured: `193` - // Estimated: `18048` - // Minimum execution time: 25_950_000 picoseconds. - Weight::from_parts(26_631_000, 0) - .saturating_add(Weight::from_parts(0, 18048)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Alliance::Members` (r:2 w:2) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:0) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:0 w:1) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:0 w:1) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn elevate_ally() -> Weight { - // Proof Size summary in bytes: - // Measured: `236` - // Estimated: `12362` - // Minimum execution time: 24_470_000 picoseconds. - Weight::from_parts(25_222_000, 0) - .saturating_add(Weight::from_parts(0, 12362)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Alliance::Members` (r:4 w:2) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:0) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:0 w:1) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:0 w:1) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Alliance::RetiringMembers` (r:0 w:1) - /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn give_retirement_notice() -> Weight { - // Proof Size summary in bytes: - // Measured: `236` - // Estimated: `23734` - // Minimum execution time: 31_519_000 picoseconds. - Weight::from_parts(32_827_000, 0) - .saturating_add(Weight::from_parts(0, 23734)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `Alliance::RetiringMembers` (r:1 w:1) - /// Proof: `Alliance::RetiringMembers` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - /// Storage: `Alliance::Members` (r:1 w:1) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `Alliance::DepositOf` (r:1 w:1) - /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn retire() -> Weight { - // Proof Size summary in bytes: - // Measured: `517` - // Estimated: `6676` - // Minimum execution time: 38_799_000 picoseconds. - Weight::from_parts(39_634_000, 0) - .saturating_add(Weight::from_parts(0, 6676)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `Alliance::Members` (r:3 w:1) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:0) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Alliance::DepositOf` (r:1 w:1) - /// Proof: `Alliance::DepositOf` (`max_values`: None, `max_size`: Some(64), added: 2539, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:0 w:1) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:0 w:1) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn kick_member() -> Weight { - // Proof Size summary in bytes: - // Measured: `643` - // Estimated: `18048` - // Minimum execution time: 137_442_000 picoseconds. - Weight::from_parts(142_142_000, 0) - .saturating_add(Weight::from_parts(0, 18048)) - .saturating_add(T::DbWeight::get().reads(13)) - .saturating_add(T::DbWeight::get().writes(8)) - } - /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) - /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) - /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 100]`. - /// The range of component `l` is `[0, 255]`. - fn add_unscrupulous_items(n: u32, l: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `27187` - // Minimum execution time: 7_189_000 picoseconds. - Weight::from_parts(7_387_000, 0) - .saturating_add(Weight::from_parts(0, 27187)) - // Standard Error: 3_417 - .saturating_add(Weight::from_parts(1_581_413, 0).saturating_mul(n.into())) - // Standard Error: 1_338 - .saturating_add(Weight::from_parts(67_739, 0).saturating_mul(l.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Alliance::UnscrupulousAccounts` (r:1 w:1) - /// Proof: `Alliance::UnscrupulousAccounts` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) - /// Storage: `Alliance::UnscrupulousWebsites` (r:1 w:1) - /// Proof: `Alliance::UnscrupulousWebsites` (`max_values`: Some(1), `max_size`: Some(25702), added: 26197, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 100]`. - /// The range of component `l` is `[0, 255]`. - fn remove_unscrupulous_items(n: u32, l: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + l * (100 ±0) + n * (289 ±0)` - // Estimated: `27187` - // Minimum execution time: 7_201_000 picoseconds. - Weight::from_parts(7_325_000, 0) - .saturating_add(Weight::from_parts(0, 27187)) - // Standard Error: 183_302 - .saturating_add(Weight::from_parts(16_886_382, 0).saturating_mul(n.into())) - // Standard Error: 71_789 - .saturating_add(Weight::from_parts(352_937, 0).saturating_mul(l.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Alliance::Members` (r:3 w:2) - /// Proof: `Alliance::Members` (`max_values`: None, `max_size`: Some(3211), added: 5686, mode: `MaxEncodedLen`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:0) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:0 w:1) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:0 w:1) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn abdicate_fellow_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `236` - // Estimated: `18048` - // Minimum execution time: 29_653_000 picoseconds. - Weight::from_parts(30_365_000, 0) - .saturating_add(Weight::from_parts(0, 18048)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs deleted file mode 100644 index 6c1cf072257..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_balances.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_balances` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_balances -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_balances`. -pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_allow_death() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 55_696_000 picoseconds. - Weight::from_parts(56_582_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_keep_alive() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 40_885_000 picoseconds. - Weight::from_parts(41_993_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_creating() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 14_565_000 picoseconds. - Weight::from_parts(15_080_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_set_balance_killing() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 22_158_000 picoseconds. - Weight::from_parts(22_715_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_transfer() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 57_957_000 picoseconds. - Weight::from_parts(58_618_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn transfer_all() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `3593` - // Minimum execution time: 52_018_000 picoseconds. - Weight::from_parts(52_795_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - fn force_unreserve() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `3593` - // Minimum execution time: 17_469_000 picoseconds. - Weight::from_parts(18_030_000, 0) - .saturating_add(Weight::from_parts(0, 3593)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `System::Account` (r:999 w:999) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `u` is `[1, 1000]`. - fn upgrade_accounts(u: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + u * (136 ±0)` - // Estimated: `990 + u * (2603 ±0)` - // Minimum execution time: 17_223_000 picoseconds. - Weight::from_parts(17_587_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 16_201 - .saturating_add(Weight::from_parts(15_360_967, 0).saturating_mul(u.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(u.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(u.into()))) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(u.into())) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs deleted file mode 100644 index 03f3ff602a5..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collator_selection.rs +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collator_selection` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_collator_selection -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collator_selection`. -pub struct WeightInfo(PhantomData); -impl pallet_collator_selection::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:20 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:0 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 20]`. - fn set_invulnerables(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `163 + b * (79 ±0)` - // Estimated: `1154 + b * (2555 ±0)` - // Minimum execution time: 14_616_000 picoseconds. - Weight::from_parts(12_150_410, 0) - .saturating_add(Weight::from_parts(0, 1154)) - // Standard Error: 6_270 - .saturating_add(Weight::from_parts(3_256_932, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(b.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 2555).saturating_mul(b.into())) - } - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `b` is `[1, 19]`. - /// The range of component `c` is `[1, 99]`. - fn add_invulnerable(b: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `756 + b * (32 ±0) + c * (53 ±0)` - // Estimated: `6287 + b * (37 ±0) + c * (53 ±0)` - // Minimum execution time: 48_450_000 picoseconds. - Weight::from_parts(51_166_679, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_588 - .saturating_add(Weight::from_parts(167_219, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 37).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 53).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:1) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// The range of component `b` is `[5, 20]`. - fn remove_invulnerable(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `119 + b * (32 ±0)` - // Estimated: `6287` - // Minimum execution time: 15_830_000 picoseconds. - Weight::from_parts(15_792_847, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 5_343 - .saturating_add(Weight::from_parts(167_955, 0).saturating_mul(b.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::DesiredCandidates` (r:0 w:1) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn set_desired_candidates() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_424_000 picoseconds. - Weight::from_parts(7_767_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::CandidacyBond` (r:0 w:1) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn set_candidacy_bond(_c: u32, _k: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_388_000 picoseconds. - Weight::from_parts(7_677_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::DesiredCandidates` (r:1 w:0) - /// Proof: `CollatorSelection::DesiredCandidates` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `Session::NextKeys` (r:1 w:0) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `CollatorSelection::CandidacyBond` (r:1 w:0) - /// Proof: `CollatorSelection::CandidacyBond` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[1, 99]`. - fn register_as_candidate(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `736 + c * (52 ±0)` - // Estimated: `6287 + c * (54 ±0)` - // Minimum execution time: 41_241_000 picoseconds. - Weight::from_parts(46_090_319, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_918 - .saturating_add(Weight::from_parts(161_140, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 54).saturating_mul(c.into())) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:1) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// The range of component `c` is `[4, 100]`. - fn leave_intent(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_221_000 picoseconds. - Weight::from_parts(36_183_872, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_766 - .saturating_add(Weight::from_parts(168_742, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn update_bond(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - fn take_candidate_slot(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `306 + c * (50 ±0)` - // Estimated: `6287` - // Minimum execution time: 34_814_000 picoseconds. - Weight::from_parts(36_371_520, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 2_391 - .saturating_add(Weight::from_parts(201_700, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `System::Account` (r:2 w:2) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:0 w:1) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - fn note_author() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `6196` - // Minimum execution time: 43_910_000 picoseconds. - Weight::from_parts(44_796_000, 0) - .saturating_add(Weight::from_parts(0, 6196)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `CollatorSelection::Candidates` (r:1 w:0) - /// Proof: `CollatorSelection::Candidates` (`max_values`: Some(1), `max_size`: Some(4802), added: 5297, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::LastAuthoredBlock` (r:100 w:0) - /// Proof: `CollatorSelection::LastAuthoredBlock` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `CollatorSelection::Invulnerables` (r:1 w:0) - /// Proof: `CollatorSelection::Invulnerables` (`max_values`: Some(1), `max_size`: Some(641), added: 1136, mode: `MaxEncodedLen`) - /// Storage: `System::BlockWeight` (r:1 w:1) - /// Proof: `System::BlockWeight` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:97 w:97) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `r` is `[1, 100]`. - /// The range of component `c` is `[1, 100]`. - fn new_session(r: u32, c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `2243 + c * (97 ±0) + r * (112 ±0)` - // Estimated: `6287 + c * (2519 ±0) + r * (2603 ±0)` - // Minimum execution time: 17_092_000 picoseconds. - Weight::from_parts(17_635_000, 0) - .saturating_add(Weight::from_parts(0, 6287)) - // Standard Error: 351_635 - .saturating_add(Weight::from_parts(15_162_192, 0).saturating_mul(c.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(c.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(c.into()))) - .saturating_add(Weight::from_parts(0, 2519).saturating_mul(c.into())) - .saturating_add(Weight::from_parts(0, 2603).saturating_mul(r.into())) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs deleted file mode 100644 index 9133baa6120..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective.rs +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_collective -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collective`. -pub struct WeightInfo(PhantomData); -impl pallet_collective::WeightInfo for WeightInfo { - /// Storage: `AllianceMotion::Members` (r:1 w:1) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:0) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Voting` (r:100 w:100) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:0 w:1) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// The range of component `m` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - /// The range of component `p` is `[0, 100]`. - fn set_members(m: u32, _n: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0 + m * (3232 ±0) + p * (3190 ±0)` - // Estimated: `15691 + m * (1967 ±23) + p * (4332 ±23)` - // Minimum execution time: 16_410_000 picoseconds. - Weight::from_parts(16_816_000, 0) - .saturating_add(Weight::from_parts(0, 15691)) - // Standard Error: 59_812 - .saturating_add(Weight::from_parts(4_516_537, 0).saturating_mul(m.into())) - // Standard Error: 59_812 - .saturating_add(Weight::from_parts(7_992_168, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 1967).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 4332).saturating_mul(p.into())) - } - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32 + m * (32 ±0)` - // Estimated: `1518 + m * (32 ±0)` - // Minimum execution time: 14_418_000 picoseconds. - Weight::from_parts(13_588_617, 0) - .saturating_add(Weight::from_parts(0, 1518)) - // Standard Error: 21 - .saturating_add(Weight::from_parts(1_711, 0).saturating_mul(b.into())) - // Standard Error: 223 - .saturating_add(Weight::from_parts(13_836, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:1 w:0) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[1, 100]`. - fn propose_execute(b: u32, m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `32 + m * (32 ±0)` - // Estimated: `3498 + m * (32 ±0)` - // Minimum execution time: 17_174_000 picoseconds. - Weight::from_parts(16_192_764, 0) - .saturating_add(Weight::from_parts(0, 3498)) - // Standard Error: 27 - .saturating_add(Weight::from_parts(1_672, 0).saturating_mul(b.into())) - // Standard Error: 280 - .saturating_add(Weight::from_parts(24_343, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(m.into())) - } - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalCount` (r:1 w:1) - /// Proof: `AllianceMotion::ProposalCount` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Voting` (r:0 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[2, 100]`. - /// The range of component `p` is `[1, 100]`. - fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `322 + m * (32 ±0) + p * (36 ±0)` - // Estimated: `3714 + m * (33 ±0) + p * (36 ±0)` - // Minimum execution time: 23_970_000 picoseconds. - Weight::from_parts(23_004_052, 0) - .saturating_add(Weight::from_parts(0, 3714)) - // Standard Error: 123 - .saturating_add(Weight::from_parts(2_728, 0).saturating_mul(b.into())) - // Standard Error: 1_291 - .saturating_add(Weight::from_parts(32_731, 0).saturating_mul(m.into())) - // Standard Error: 1_275 - .saturating_add(Weight::from_parts(199_537, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(Weight::from_parts(0, 33).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `m` is `[5, 100]`. - fn vote(m: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `771 + m * (64 ±0)` - // Estimated: `4235 + m * (64 ±0)` - // Minimum execution time: 25_843_000 picoseconds. - Weight::from_parts(26_092_578, 0) - .saturating_add(Weight::from_parts(0, 4235)) - // Standard Error: 1_785 - .saturating_add(Weight::from_parts(67_298, 0).saturating_mul(m.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 64).saturating_mul(m.into())) - } - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `360 + m * (64 ±0) + p * (36 ±0)` - // Estimated: `3805 + m * (65 ±0) + p * (36 ±0)` - // Minimum execution time: 27_543_000 picoseconds. - Weight::from_parts(26_505_473, 0) - .saturating_add(Weight::from_parts(0, 3805)) - // Standard Error: 1_054 - .saturating_add(Weight::from_parts(35_295, 0).saturating_mul(m.into())) - // Standard Error: 1_028 - .saturating_add(Weight::from_parts(190_508, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 65).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `662 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `3979 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 40_375_000 picoseconds. - Weight::from_parts(34_081_294, 0) - .saturating_add(Weight::from_parts(0, 3979)) - // Standard Error: 196 - .saturating_add(Weight::from_parts(3_796, 0).saturating_mul(b.into())) - // Standard Error: 2_072 - .saturating_add(Weight::from_parts(50_954, 0).saturating_mul(m.into())) - // Standard Error: 2_020 - .saturating_add(Weight::from_parts(246_000, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:1 w:0) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_disapproved(m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `458 + m * (48 ±0) + p * (36 ±0)` - // Estimated: `3898 + m * (49 ±0) + p * (36 ±0)` - // Minimum execution time: 28_793_000 picoseconds. - Weight::from_parts(29_656_832, 0) - .saturating_add(Weight::from_parts(0, 3898)) - // Standard Error: 1_214 - .saturating_add(Weight::from_parts(22_148, 0).saturating_mul(m.into())) - // Standard Error: 1_184 - .saturating_add(Weight::from_parts(189_860, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 49).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 36).saturating_mul(p.into())) - } - /// Storage: `AllianceMotion::Voting` (r:1 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Members` (r:1 w:0) - /// Proof: `AllianceMotion::Members` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Prime` (r:1 w:0) - /// Proof: `AllianceMotion::Prime` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:1 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// The range of component `b` is `[2, 1024]`. - /// The range of component `m` is `[4, 100]`. - /// The range of component `p` is `[1, 100]`. - fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `682 + b * (1 ±0) + m * (64 ±0) + p * (40 ±0)` - // Estimated: `3999 + b * (1 ±0) + m * (66 ±0) + p * (40 ±0)` - // Minimum execution time: 40_887_000 picoseconds. - Weight::from_parts(39_529_567, 0) - .saturating_add(Weight::from_parts(0, 3999)) - // Standard Error: 191 - .saturating_add(Weight::from_parts(2_802, 0).saturating_mul(b.into())) - // Standard Error: 2_021 - .saturating_add(Weight::from_parts(35_956, 0).saturating_mul(m.into())) - // Standard Error: 1_970 - .saturating_add(Weight::from_parts(235_154, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(b.into())) - .saturating_add(Weight::from_parts(0, 66).saturating_mul(m.into())) - .saturating_add(Weight::from_parts(0, 40).saturating_mul(p.into())) - } - /// Storage: `AllianceMotion::Proposals` (r:1 w:1) - /// Proof: `AllianceMotion::Proposals` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::Voting` (r:0 w:1) - /// Proof: `AllianceMotion::Voting` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `AllianceMotion::ProposalOf` (r:0 w:1) - /// Proof: `AllianceMotion::ProposalOf` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[1, 100]`. - fn disapprove_proposal(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `189 + p * (32 ±0)` - // Estimated: `1674 + p * (32 ±0)` - // Minimum execution time: 14_040_000 picoseconds. - Weight::from_parts(15_075_964, 0) - .saturating_add(Weight::from_parts(0, 1674)) - // Standard Error: 854 - .saturating_add(Weight::from_parts(159_597, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(3)) - .saturating_add(Weight::from_parts(0, 32).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs deleted file mode 100644 index 6be94db22db..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_collective_content.rs +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_collective_content` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-18, STEPS: `10`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/debug/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --steps=10 -// --repeat=3 -// --pallet=pallet_collective_content -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_collective_content`. -pub struct WeightInfo(PhantomData); -impl pallet_collective_content::WeightInfo for WeightInfo { - /// Storage: `AmbassadorContent::Charter` (r:0 w:1) - /// Proof: `AmbassadorContent::Charter` (`max_values`: Some(1), `max_size`: Some(70), added: 565, mode: `MaxEncodedLen`) - fn set_charter() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 99_000_000 picoseconds. - Weight::from_parts(99_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) - /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorContent::NextAnnouncementExpireAt` (r:1 w:1) - /// Proof: `AmbassadorContent::NextAnnouncementExpireAt` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorContent::Announcements` (r:0 w:1) - /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) - fn announce() -> Weight { - // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `3507` - // Minimum execution time: 273_000_000 picoseconds. - Weight::from_parts(278_000_000, 0) - .saturating_add(Weight::from_parts(0, 3507)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorContent::Announcements` (r:1 w:1) - /// Proof: `AmbassadorContent::Announcements` (`max_values`: None, `max_size`: Some(90), added: 2565, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorContent::AnnouncementsCount` (r:1 w:1) - /// Proof: `AmbassadorContent::AnnouncementsCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn remove_announcement() -> Weight { - // Proof Size summary in bytes: - // Measured: `450` - // Estimated: `3555` - // Minimum execution time: 326_000_000 picoseconds. - Weight::from_parts(338_000_000, 0) - .saturating_add(Weight::from_parts(0, 3555)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs deleted file mode 100644 index f40940a8b25..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_ambassador_core.rs +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_core_fellowship` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_core_fellowship -// --extrinsic=* -// --steps=2 -// --repeat=2 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_core_fellowship`. -pub struct WeightInfo(PhantomData); -impl pallet_core_fellowship::WeightInfo for WeightInfo { - /// Storage: `AmbassadorCore::Params` (r:0 w:1) - /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) - fn set_params() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_000_000 picoseconds. - Weight::from_parts(11_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorCore::Member` (r:1 w:1) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Members` (r:1 w:1) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::Params` (r:1 w:0) - /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) - /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) - /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn bump_offboard() -> Weight { - // Proof Size summary in bytes: - // Measured: `66011` - // Estimated: `69046` - // Minimum execution time: 96_000_000 picoseconds. - Weight::from_parts(111_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorCore::Member` (r:1 w:1) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Members` (r:1 w:1) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::Params` (r:1 w:0) - /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:0) - /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) - /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn bump_demote() -> Weight { - // Proof Size summary in bytes: - // Measured: `66121` - // Estimated: `69046` - // Minimum execution time: 99_000_000 picoseconds. - Weight::from_parts(116_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::Member` (r:1 w:1) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - fn set_active() -> Weight { - // Proof Size summary in bytes: - // Measured: `360` - // Estimated: `3514` - // Minimum execution time: 21_000_000 picoseconds. - Weight::from_parts(22_000_000, 0) - .saturating_add(Weight::from_parts(0, 3514)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorCore::Member` (r:1 w:1) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Members` (r:1 w:1) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) - /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) - /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - fn induct() -> Weight { - // Proof Size summary in bytes: - // Measured: `118` - // Estimated: `3514` - // Minimum execution time: 36_000_000 picoseconds. - Weight::from_parts(36_000_000, 0) - .saturating_add(Weight::from_parts(0, 3514)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:1) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::Member` (r:1 w:1) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::Params` (r:1 w:0) - /// Proof: `AmbassadorCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) - /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) - /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) - /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - fn promote() -> Weight { - // Proof Size summary in bytes: - // Measured: `65989` - // Estimated: `69046` - // Minimum execution time: 95_000_000 picoseconds. - Weight::from_parts(110_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::Member` (r:1 w:1) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::MemberEvidence` (r:0 w:1) - /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn offboard() -> Weight { - // Proof Size summary in bytes: - // Measured: `331` - // Estimated: `3514` - // Minimum execution time: 21_000_000 picoseconds. - Weight::from_parts(22_000_000, 0) - .saturating_add(Weight::from_parts(0, 3514)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorCore::Member` (r:1 w:1) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - fn import() -> Weight { - // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `3514` - // Minimum execution time: 20_000_000 picoseconds. - Weight::from_parts(21_000_000, 0) - .saturating_add(Weight::from_parts(0, 3514)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::Member` (r:1 w:1) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) - /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn approve() -> Weight { - // Proof Size summary in bytes: - // Measured: `65967` - // Estimated: `69046` - // Minimum execution time: 78_000_000 picoseconds. - Weight::from_parts(104_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorCore::Member` (r:1 w:0) - /// Proof: `AmbassadorCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCore::MemberEvidence` (r:1 w:1) - /// Proof: `AmbassadorCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn submit_evidence() -> Weight { - // Proof Size summary in bytes: - // Measured: `151` - // Estimated: `69046` - // Minimum execution time: 43_000_000 picoseconds. - Weight::from_parts(44_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs deleted file mode 100644 index 471ee82ead7..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_core_fellowship_fellowship_core.rs +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_core_fellowship` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_core_fellowship -// --extrinsic=* -// --steps=2 -// --repeat=2 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_core_fellowship`. -pub struct WeightInfo(PhantomData); -impl pallet_core_fellowship::WeightInfo for WeightInfo { - /// Storage: `FellowshipCore::Params` (r:0 w:1) - /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) - fn set_params() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 11_000_000 picoseconds. - Weight::from_parts(12_000_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipCore::Member` (r:1 w:1) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Members` (r:1 w:1) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::Params` (r:1 w:0) - /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0) - /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) - /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn bump_offboard() -> Weight { - // Proof Size summary in bytes: - // Measured: `66144` - // Estimated: `69046` - // Minimum execution time: 109_000_000 picoseconds. - Weight::from_parts(125_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipCore::Member` (r:1 w:1) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Members` (r:1 w:1) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::Params` (r:1 w:0) - /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:0) - /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) - /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn bump_demote() -> Weight { - // Proof Size summary in bytes: - // Measured: `66254` - // Estimated: `69046` - // Minimum execution time: 112_000_000 picoseconds. - Weight::from_parts(114_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::Member` (r:1 w:1) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - fn set_active() -> Weight { - // Proof Size summary in bytes: - // Measured: `493` - // Estimated: `3514` - // Minimum execution time: 22_000_000 picoseconds. - Weight::from_parts(27_000_000, 0) - .saturating_add(Weight::from_parts(0, 3514)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipCore::Member` (r:1 w:1) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Members` (r:1 w:1) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) - /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) - /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - fn induct() -> Weight { - // Proof Size summary in bytes: - // Measured: `251` - // Estimated: `3514` - // Minimum execution time: 35_000_000 picoseconds. - Weight::from_parts(36_000_000, 0) - .saturating_add(Weight::from_parts(0, 3514)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:1) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::Member` (r:1 w:1) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::Params` (r:1 w:0) - /// Proof: `FellowshipCore::Params` (`max_values`: Some(1), `max_size`: Some(364), added: 859, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) - /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) - /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) - /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - fn promote() -> Weight { - // Proof Size summary in bytes: - // Measured: `66122` - // Estimated: `69046` - // Minimum execution time: 97_000_000 picoseconds. - Weight::from_parts(129_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::Member` (r:1 w:1) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::MemberEvidence` (r:0 w:1) - /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn offboard() -> Weight { - // Proof Size summary in bytes: - // Measured: `464` - // Estimated: `3514` - // Minimum execution time: 22_000_000 picoseconds. - Weight::from_parts(22_000_000, 0) - .saturating_add(Weight::from_parts(0, 3514)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipCore::Member` (r:1 w:1) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - fn import() -> Weight { - // Proof Size summary in bytes: - // Measured: `418` - // Estimated: `3514` - // Minimum execution time: 20_000_000 picoseconds. - Weight::from_parts(24_000_000, 0) - .saturating_add(Weight::from_parts(0, 3514)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::Member` (r:1 w:1) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) - /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn approve() -> Weight { - // Proof Size summary in bytes: - // Measured: `66100` - // Estimated: `69046` - // Minimum execution time: 89_000_000 picoseconds. - Weight::from_parts(119_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipCore::Member` (r:1 w:0) - /// Proof: `FellowshipCore::Member` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCore::MemberEvidence` (r:1 w:1) - /// Proof: `FellowshipCore::MemberEvidence` (`max_values`: None, `max_size`: Some(65581), added: 68056, mode: `MaxEncodedLen`) - fn submit_evidence() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `69046` - // Minimum execution time: 43_000_000 picoseconds. - Weight::from_parts(52_000_000, 0) - .saturating_add(Weight::from_parts(0, 69046)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs deleted file mode 100644 index 4bd71c4e7d4..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// collectives-polkadot-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/collectives/collectives-polkadot/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 11_440_000 picoseconds. - Weight::from_parts(11_440_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 11_077_000 picoseconds. - Weight::from_parts(11_077_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 3_977_000 picoseconds. - Weight::from_parts(3_977_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 4_831_000 picoseconds. - Weight::from_parts(4_831_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_192_000 picoseconds. - Weight::from_parts(5_192_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 58_750_000 picoseconds. - Weight::from_parts(58_750_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 5_107_000 picoseconds. - Weight::from_parts(5_107_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 46_814_000 picoseconds. - Weight::from_parts(46_814_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 52_510_000 picoseconds. - Weight::from_parts(52_510_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 71_930_000 picoseconds. - Weight::from_parts(71_930_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs deleted file mode 100644 index a7827b72009..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_multisig.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_multisig` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_multisig -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_multisig`. -pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { - /// The range of component `z` is `[0, 10000]`. - fn as_multi_threshold_1(z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 13_288_000 picoseconds. - Weight::from_parts(14_235_741, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(500, 0).saturating_mul(z.into())) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_create(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `328 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 44_865_000 picoseconds. - Weight::from_parts(33_468_056, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_513 - .saturating_add(Weight::from_parts(130_544, 0).saturating_mul(s.into())) - // Standard Error: 14 - .saturating_add(Weight::from_parts(1_422, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[3, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_approve(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `348` - // Estimated: `6811` - // Minimum execution time: 29_284_000 picoseconds. - Weight::from_parts(18_708_967, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 916 - .saturating_add(Weight::from_parts(119_202, 0).saturating_mul(s.into())) - // Standard Error: 8 - .saturating_add(Weight::from_parts(1_447, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - /// The range of component `z` is `[0, 10000]`. - fn as_multi_complete(s: u32, z: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `451 + s * (33 ±0)` - // Estimated: `6811` - // Minimum execution time: 49_462_000 picoseconds. - Weight::from_parts(34_470_286, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_738 - .saturating_add(Weight::from_parts(178_227, 0).saturating_mul(s.into())) - // Standard Error: 17 - .saturating_add(Weight::from_parts(1_644, 0).saturating_mul(z.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_create(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `329 + s * (2 ±0)` - // Estimated: `6811` - // Minimum execution time: 30_749_000 picoseconds. - Weight::from_parts(31_841_438, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 1_033 - .saturating_add(Weight::from_parts(123_126, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn approve_as_multi_approve(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `348` - // Estimated: `6811` - // Minimum execution time: 17_436_000 picoseconds. - Weight::from_parts(18_036_002, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 829 - .saturating_add(Weight::from_parts(109_450, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Multisig::Multisigs` (r:1 w:1) - /// Proof: `Multisig::Multisigs` (`max_values`: None, `max_size`: Some(3346), added: 5821, mode: `MaxEncodedLen`) - /// The range of component `s` is `[2, 100]`. - fn cancel_as_multi(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `520 + s * (1 ±0)` - // Estimated: `6811` - // Minimum execution time: 31_532_000 picoseconds. - Weight::from_parts(32_818_015, 0) - .saturating_add(Weight::from_parts(0, 6811)) - // Standard Error: 977 - .saturating_add(Weight::from_parts(123_121, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs deleted file mode 100644 index 9b45c875818..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_preimage.rs +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_preimage` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_preimage -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_preimage`. -pub struct WeightInfo(PhantomData); -impl pallet_preimage::WeightInfo for WeightInfo { - fn ensure_updated(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `193 + n * (91 ±0)` - // Estimated: `3593 + n * (2566 ±0)` - // Minimum execution time: 2_000_000 picoseconds. - Weight::from_parts(2_000_000, 3593) - // Standard Error: 13_720 - .saturating_add(Weight::from_parts(17_309_199, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2566).saturating_mul(n.into())) - } - - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `177` - // Estimated: `3556` - // Minimum execution time: 29_323_000 picoseconds. - Weight::from_parts(29_793_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(2_504, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_requested_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `3556` - // Minimum execution time: 15_581_000 picoseconds. - Weight::from_parts(15_659_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 4 - .saturating_add(Weight::from_parts(2_500, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 4194304]`. - fn note_no_deposit_preimage(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `3556` - // Minimum execution time: 15_028_000 picoseconds. - Weight::from_parts(15_150_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - // Standard Error: 6 - .saturating_add(Weight::from_parts(2_560, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unnote_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `323` - // Estimated: `3556` - // Minimum execution time: 55_113_000 picoseconds. - Weight::from_parts(59_127_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unnote_no_deposit_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `3556` - // Minimum execution time: 38_033_000 picoseconds. - Weight::from_parts(41_203_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - fn request_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `222` - // Estimated: `3556` - // Minimum execution time: 31_482_000 picoseconds. - Weight::from_parts(34_726_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - fn request_no_deposit_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `3556` - // Minimum execution time: 20_724_000 picoseconds. - Weight::from_parts(22_928_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - fn request_unnoted_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `76` - // Estimated: `3556` - // Minimum execution time: 27_015_000 picoseconds. - Weight::from_parts(29_240_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - fn request_requested_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `3556` - // Minimum execution time: 10_712_000 picoseconds. - Weight::from_parts(11_317_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `Preimage::PreimageFor` (r:0 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `MaxEncodedLen`) - fn unrequest_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `178` - // Estimated: `3556` - // Minimum execution time: 34_528_000 picoseconds. - Weight::from_parts(35_982_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - fn unrequest_unnoted_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `3556` - // Minimum execution time: 11_059_000 picoseconds. - Weight::from_parts(12_458_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - fn unrequest_multi_referenced_preimage() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `3556` - // Minimum execution time: 11_502_000 picoseconds. - Weight::from_parts(12_180_000, 0) - .saturating_add(Weight::from_parts(0, 3556)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs deleted file mode 100644 index 59d9f912bf1..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_proxy.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_proxy` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_proxy -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_proxy`. -pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 15_597_000 picoseconds. - Weight::from_parts(16_231_993, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_665 - .saturating_add(Weight::from_parts(29_818, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn proxy_announced(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `454 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 36_685_000 picoseconds. - Weight::from_parts(36_376_358, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 3_003 - .saturating_add(Weight::from_parts(133_776, 0).saturating_mul(a.into())) - // Standard Error: 3_103 - .saturating_add(Weight::from_parts(60_315, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn remove_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 23_835_000 picoseconds. - Weight::from_parts(24_154_219, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_580 - .saturating_add(Weight::from_parts(125_884, 0).saturating_mul(a.into())) - // Standard Error: 1_632 - .saturating_add(Weight::from_parts(21_563, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn reject_announcement(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `369 + a * (68 ±0)` - // Estimated: `5698` - // Minimum execution time: 23_997_000 picoseconds. - Weight::from_parts(24_301_638, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_658 - .saturating_add(Weight::from_parts(133_005, 0).saturating_mul(a.into())) - // Standard Error: 1_713 - .saturating_add(Weight::from_parts(20_237, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:0) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// Storage: `Proxy::Announcements` (r:1 w:1) - /// Proof: `Proxy::Announcements` (`max_values`: None, `max_size`: Some(2233), added: 4708, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// The range of component `a` is `[0, 31]`. - /// The range of component `p` is `[1, 31]`. - fn announce(a: u32, p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `386 + a * (68 ±0) + p * (37 ±0)` - // Estimated: `5698` - // Minimum execution time: 33_604_000 picoseconds. - Weight::from_parts(33_322_880, 0) - .saturating_add(Weight::from_parts(0, 5698)) - // Standard Error: 1_840 - .saturating_add(Weight::from_parts(114_037, 0).saturating_mul(a.into())) - // Standard Error: 1_901 - .saturating_add(Weight::from_parts(45_629, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn add_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 24_634_000 picoseconds. - Weight::from_parts(25_509_118, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_278 - .saturating_add(Weight::from_parts(38_401, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxy(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 24_855_000 picoseconds. - Weight::from_parts(25_753_505, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_819 - .saturating_add(Weight::from_parts(44_357, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn remove_proxies(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `127 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 22_211_000 picoseconds. - Weight::from_parts(23_094_124, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_597 - .saturating_add(Weight::from_parts(36_725, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[1, 31]`. - fn create_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `139` - // Estimated: `4706` - // Minimum execution time: 26_764_000 picoseconds. - Weight::from_parts(27_667_535, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 1_111 - .saturating_add(Weight::from_parts(3_422, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Proxy::Proxies` (r:1 w:1) - /// Proof: `Proxy::Proxies` (`max_values`: None, `max_size`: Some(1241), added: 3716, mode: `MaxEncodedLen`) - /// The range of component `p` is `[0, 30]`. - fn kill_pure(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164 + p * (37 ±0)` - // Estimated: `4706` - // Minimum execution time: 22_632_000 picoseconds. - Weight::from_parts(23_678_772, 0) - .saturating_add(Weight::from_parts(0, 4706)) - // Standard Error: 2_136 - .saturating_add(Weight::from_parts(26_492, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs deleted file mode 100644 index a6372c4b89d..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_ambassador_collective.rs +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_ranked_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_ranked_collective -// --extrinsic=* -// --steps=2 -// --repeat=2 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_ranked_collective`. -pub struct WeightInfo(PhantomData); -impl pallet_ranked_collective::WeightInfo for WeightInfo { - /// Storage: `AmbassadorCollective::Members` (r:1 w:1) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) - /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) - /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - fn add_member() -> Weight { - // Proof Size summary in bytes: - // Measured: `42` - // Estimated: `3507` - // Minimum execution time: 21_000_000 picoseconds. - Weight::from_parts(23_000_000, 0) - .saturating_add(Weight::from_parts(0, 3507)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:1) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:11 w:11) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IdToIndex` (r:11 w:11) - /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IndexToId` (r:11 w:11) - /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 10]`. - /// The range of component `r` is `[0, 10]`. - fn remove_member(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `508 + r * (281 ±0)` - // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 34_000_000 picoseconds. - Weight::from_parts(36_500_000, 0) - .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 158_113 - .saturating_add(Weight::from_parts(16_000_000, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:1) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IndexToId` (r:0 w:1) - /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IdToIndex` (r:0 w:1) - /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 10]`. - /// The range of component `r` is `[0, 10]`. - fn promote_member(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `210 + r * (17 ±0)` - // Estimated: `3507` - // Minimum execution time: 25_000_000 picoseconds. - Weight::from_parts(26_000_000, 0) - .saturating_add(Weight::from_parts(0, 3507)) - // Standard Error: 180_277 - .saturating_add(Weight::from_parts(650_000, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:1) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:1) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IdToIndex` (r:1 w:1) - /// Proof: `AmbassadorCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::IndexToId` (r:1 w:1) - /// Proof: `AmbassadorCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 10]`. - /// The range of component `r` is `[0, 10]`. - fn demote_member(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `508 + r * (71 ±0)` - // Estimated: `3519` - // Minimum execution time: 34_000_000 picoseconds. - Weight::from_parts(36_500_000, 0) - .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 335_410 - .saturating_add(Weight::from_parts(550_000, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Voting` (r:1 w:1) - /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn vote() -> Weight { - // Proof Size summary in bytes: - // Measured: `566` - // Estimated: `317568` - // Minimum execution time: 57_000_000 picoseconds. - Weight::from_parts(60_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::VotingCleanup` (r:1 w:0) - /// Proof: `AmbassadorCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Voting` (r:100 w:100) - /// Proof: `AmbassadorCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - fn cleanup_poll(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `209 + n * (52 ±0)` - // Estimated: `4365 + n * (2550 ±0)` - // Minimum execution time: 18_000_000 picoseconds. - Weight::from_parts(18_500_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - // Standard Error: 11_180 - .saturating_add(Weight::from_parts(1_335_000, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs deleted file mode 100644 index 9c773c56ac3..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_ranked_collective_fellowship_collective.rs +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_ranked_collective` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_ranked_collective -// --extrinsic=* -// --steps=2 -// --repeat=2 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_ranked_collective`. -pub struct WeightInfo(PhantomData); -impl pallet_ranked_collective::WeightInfo for WeightInfo { - /// Storage: `FellowshipCollective::Members` (r:1 w:1) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) - /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) - /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - fn add_member() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3507` - // Minimum execution time: 21_000_000 picoseconds. - Weight::from_parts(22_000_000, 0) - .saturating_add(Weight::from_parts(0, 3507)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:1) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:11 w:11) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:11 w:11) - /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IndexToId` (r:11 w:11) - /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 10]`. - /// The range of component `r` is `[0, 10]`. - fn remove_member(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `608 + r * (281 ±0)` - // Estimated: `3519 + r * (2529 ±0)` - // Minimum execution time: 35_000_000 picoseconds. - Weight::from_parts(36_500_000, 0) - .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 254_950 - .saturating_add(Weight::from_parts(15_900_000, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().reads((3_u64).saturating_mul(r.into()))) - .saturating_add(T::DbWeight::get().writes(4)) - .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(r.into()))) - .saturating_add(Weight::from_parts(0, 2529).saturating_mul(r.into())) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:1) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IndexToId` (r:0 w:1) - /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:0 w:1) - /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 10]`. - /// The range of component `r` is `[0, 10]`. - fn promote_member(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `310 + r * (17 ±0)` - // Estimated: `3507` - // Minimum execution time: 25_000_000 picoseconds. - Weight::from_parts(25_500_000, 0) - .saturating_add(Weight::from_parts(0, 3507)) - // Standard Error: 70_710 - .saturating_add(Weight::from_parts(400_000, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:1) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:1) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IdToIndex` (r:1 w:1) - /// Proof: `FellowshipCollective::IdToIndex` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::IndexToId` (r:1 w:1) - /// Proof: `FellowshipCollective::IndexToId` (`max_values`: None, `max_size`: Some(54), added: 2529, mode: `MaxEncodedLen`) - /// The range of component `r` is `[0, 10]`. - /// The range of component `r` is `[0, 10]`. - fn demote_member(r: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `608 + r * (71 ±0)` - // Estimated: `3519` - // Minimum execution time: 35_000_000 picoseconds. - Weight::from_parts(37_500_000, 0) - .saturating_add(Weight::from_parts(0, 3519)) - // Standard Error: 150_000 - .saturating_add(Weight::from_parts(350_000, 0).saturating_mul(r.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Voting` (r:1 w:1) - /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn vote() -> Weight { - // Proof Size summary in bytes: - // Measured: `700` - // Estimated: `317568` - // Minimum execution time: 57_000_000 picoseconds. - Weight::from_parts(57_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::VotingCleanup` (r:1 w:0) - /// Proof: `FellowshipCollective::VotingCleanup` (`max_values`: None, `max_size`: Some(114), added: 2589, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Voting` (r:100 w:100) - /// Proof: `FellowshipCollective::Voting` (`max_values`: None, `max_size`: Some(65), added: 2540, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 100]`. - /// The range of component `n` is `[0, 100]`. - fn cleanup_poll(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `343 + n * (52 ±0)` - // Estimated: `4365 + n * (2550 ±0)` - // Minimum execution time: 18_000_000 picoseconds. - Weight::from_parts(19_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - // Standard Error: 25_000 - .saturating_add(Weight::from_parts(1_395_000, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - .saturating_add(Weight::from_parts(0, 2550).saturating_mul(n.into())) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs deleted file mode 100644 index fdc451c5d31..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_ambassador_referenda.rs +++ /dev/null @@ -1,536 +0,0 @@ -// Copyright Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_referenda` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_referenda -// --extrinsic=* -// --steps=2 -// --repeat=2 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_referenda`. -pub struct WeightInfo(PhantomData); -impl pallet_referenda::WeightInfo for WeightInfo { - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::ReferendumCount` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:0 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - fn submit() -> Weight { - // Proof Size summary in bytes: - // Measured: `255` - // Estimated: `159279` - // Minimum execution time: 32_000_000 picoseconds. - Weight::from_parts(34_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_preparing() -> Weight { - // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `317568` - // Minimum execution time: 63_000_000 picoseconds. - Weight::from_parts(68_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `1165` - // Estimated: `159279` - // Minimum execution time: 97_000_000 picoseconds. - Weight::from_parts(123_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_not_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `1173` - // Estimated: `159279` - // Minimum execution time: 104_000_000 picoseconds. - Weight::from_parts(111_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `702` - // Estimated: `317568` - // Minimum execution time: 140_000_000 picoseconds. - Weight::from_parts(150_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `601` - // Estimated: `317568` - // Minimum execution time: 81_000_000 picoseconds. - Weight::from_parts(82_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - fn refund_decision_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `317` - // Estimated: `4365` - // Minimum execution time: 38_000_000 picoseconds. - Weight::from_parts(38_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - fn refund_submission_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `167` - // Estimated: `4365` - // Minimum execution time: 17_000_000 picoseconds. - Weight::from_parts(18_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn cancel() -> Weight { - // Proof Size summary in bytes: - // Measured: `311` - // Estimated: `317568` - // Minimum execution time: 44_000_000 picoseconds. - Weight::from_parts(45_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:0) - /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn kill() -> Weight { - // Proof Size summary in bytes: - // Measured: `626` - // Estimated: `317568` - // Minimum execution time: 183_000_000 picoseconds. - Weight::from_parts(187_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:0) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - fn one_fewer_deciding_queue_empty() -> Weight { - // Proof Size summary in bytes: - // Measured: `140` - // Estimated: `3636` - // Minimum execution time: 12_000_000 picoseconds. - Weight::from_parts(12_000_000, 0) - .saturating_add(Weight::from_parts(0, 3636)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn one_fewer_deciding_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `1412` - // Estimated: `159279` - // Minimum execution time: 88_000_000 picoseconds. - Weight::from_parts(97_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn one_fewer_deciding_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `1412` - // Estimated: `159279` - // Minimum execution time: 87_000_000 picoseconds. - Weight::from_parts(92_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - fn nudge_referendum_requeued_insertion() -> Weight { - // Proof Size summary in bytes: - // Measured: `935` - // Estimated: `4365` - // Minimum execution time: 43_000_000 picoseconds. - Weight::from_parts(46_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - fn nudge_referendum_requeued_slide() -> Weight { - // Proof Size summary in bytes: - // Measured: `935` - // Estimated: `4365` - // Minimum execution time: 39_000_000 picoseconds. - Weight::from_parts(43_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - fn nudge_referendum_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `951` - // Estimated: `4365` - // Minimum execution time: 48_000_000 picoseconds. - Weight::from_parts(50_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:0) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::TrackQueue` (r:1 w:1) - /// Proof: `AmbassadorReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(171), added: 2646, mode: `MaxEncodedLen`) - fn nudge_referendum_not_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `959` - // Estimated: `4365` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(48_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_no_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `263` - // Estimated: `159279` - // Minimum execution time: 28_000_000 picoseconds. - Weight::from_parts(30_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_preparing() -> Weight { - // Proof Size summary in bytes: - // Measured: `311` - // Estimated: `159279` - // Minimum execution time: 26_000_000 picoseconds. - Weight::from_parts(28_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - fn nudge_referendum_timed_out() -> Weight { - // Proof Size summary in bytes: - // Measured: `208` - // Estimated: `4365` - // Minimum execution time: 19_000_000 picoseconds. - Weight::from_parts(20_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_begin_deciding_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `546` - // Estimated: `159279` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(46_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::DecidingCount` (r:1 w:1) - /// Proof: `AmbassadorReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_begin_deciding_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `647` - // Estimated: `159279` - // Minimum execution time: 87_000_000 picoseconds. - Weight::from_parts(93_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_begin_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `700` - // Estimated: `159279` - // Minimum execution time: 100_000_000 picoseconds. - Weight::from_parts(120_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_end_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `683` - // Estimated: `159279` - // Minimum execution time: 90_000_000 picoseconds. - Weight::from_parts(100_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_continue_not_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `700` - // Estimated: `159279` - // Minimum execution time: 77_000_000 picoseconds. - Weight::from_parts(82_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_continue_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `704` - // Estimated: `159279` - // Minimum execution time: 68_000_000 picoseconds. - Weight::from_parts(77_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:1 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn nudge_referendum_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `704` - // Estimated: `317568` - // Minimum execution time: 99_000_000 picoseconds. - Weight::from_parts(104_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::MemberCount` (r:1 w:0) - /// Proof: `AmbassadorCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_rejected() -> Weight { - // Proof Size summary in bytes: - // Measured: `700` - // Estimated: `159279` - // Minimum execution time: 87_000_000 picoseconds. - Weight::from_parts(100_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::MetadataOf` (r:0 w:1) - /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_some_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `419` - // Estimated: `4365` - // Minimum execution time: 23_000_000 picoseconds. - Weight::from_parts(25_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `AmbassadorReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorReferenda::MetadataOf` (r:1 w:1) - /// Proof: `AmbassadorReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `285` - // Estimated: `4365` - // Minimum execution time: 20_000_000 picoseconds. - Weight::from_parts(21_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs deleted file mode 100644 index 63f68833795..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_referenda_fellowship_referenda.rs +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_referenda` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_referenda -// --extrinsic=* -// --steps=2 -// --repeat=2 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_referenda`. -pub struct WeightInfo(PhantomData); -impl pallet_referenda::WeightInfo for WeightInfo { - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::ReferendumCount` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:0 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - fn submit() -> Weight { - // Proof Size summary in bytes: - // Measured: `389` - // Estimated: `159279` - // Minimum execution time: 34_000_000 picoseconds. - Weight::from_parts(36_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_preparing() -> Weight { - // Proof Size summary in bytes: - // Measured: `400` - // Estimated: `317568` - // Minimum execution time: 64_000_000 picoseconds. - Weight::from_parts(67_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `2038` - // Estimated: `159279` - // Minimum execution time: 99_000_000 picoseconds. - Weight::from_parts(109_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_not_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `2079` - // Estimated: `159279` - // Minimum execution time: 101_000_000 picoseconds. - Weight::from_parts(111_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `836` - // Estimated: `317568` - // Minimum execution time: 135_000_000 picoseconds. - Weight::from_parts(153_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn place_decision_deposit_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `735` - // Estimated: `317568` - // Minimum execution time: 78_000_000 picoseconds. - Weight::from_parts(82_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - fn refund_decision_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `351` - // Estimated: `4365` - // Minimum execution time: 38_000_000 picoseconds. - Weight::from_parts(39_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - fn refund_submission_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `201` - // Estimated: `4365` - // Minimum execution time: 18_000_000 picoseconds. - Weight::from_parts(19_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn cancel() -> Weight { - // Proof Size summary in bytes: - // Measured: `345` - // Estimated: `317568` - // Minimum execution time: 45_000_000 picoseconds. - Weight::from_parts(46_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:0) - /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn kill() -> Weight { - // Proof Size summary in bytes: - // Measured: `587` - // Estimated: `317568` - // Minimum execution time: 185_000_000 picoseconds. - Weight::from_parts(196_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(11)) - .saturating_add(T::DbWeight::get().writes(6)) - } - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:0) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - fn one_fewer_deciding_queue_empty() -> Weight { - // Proof Size summary in bytes: - // Measured: `174` - // Estimated: `4277` - // Minimum execution time: 12_000_000 picoseconds. - Weight::from_parts(15_000_000, 0) - .saturating_add(Weight::from_parts(0, 4277)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn one_fewer_deciding_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `2452` - // Estimated: `159279` - // Minimum execution time: 82_000_000 picoseconds. - Weight::from_parts(90_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn one_fewer_deciding_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `2452` - // Estimated: `159279` - // Minimum execution time: 91_000_000 picoseconds. - Weight::from_parts(99_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - fn nudge_referendum_requeued_insertion() -> Weight { - // Proof Size summary in bytes: - // Measured: `1841` - // Estimated: `4365` - // Minimum execution time: 41_000_000 picoseconds. - Weight::from_parts(44_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - fn nudge_referendum_requeued_slide() -> Weight { - // Proof Size summary in bytes: - // Measured: `1808` - // Estimated: `4365` - // Minimum execution time: 46_000_000 picoseconds. - Weight::from_parts(55_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - fn nudge_referendum_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `1824` - // Estimated: `4365` - // Minimum execution time: 49_000_000 picoseconds. - Weight::from_parts(53_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:0) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::TrackQueue` (r:1 w:1) - /// Proof: `FellowshipReferenda::TrackQueue` (`max_values`: None, `max_size`: Some(812), added: 3287, mode: `MaxEncodedLen`) - fn nudge_referendum_not_queued() -> Weight { - // Proof Size summary in bytes: - // Measured: `1865` - // Estimated: `4365` - // Minimum execution time: 51_000_000 picoseconds. - Weight::from_parts(54_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_no_deposit() -> Weight { - // Proof Size summary in bytes: - // Measured: `297` - // Estimated: `159279` - // Minimum execution time: 28_000_000 picoseconds. - Weight::from_parts(30_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_preparing() -> Weight { - // Proof Size summary in bytes: - // Measured: `345` - // Estimated: `159279` - // Minimum execution time: 28_000_000 picoseconds. - Weight::from_parts(29_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - fn nudge_referendum_timed_out() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `4365` - // Minimum execution time: 20_000_000 picoseconds. - Weight::from_parts(21_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_begin_deciding_failing() -> Weight { - // Proof Size summary in bytes: - // Measured: `680` - // Estimated: `159279` - // Minimum execution time: 42_000_000 picoseconds. - Weight::from_parts(47_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::DecidingCount` (r:1 w:1) - /// Proof: `FellowshipReferenda::DecidingCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_begin_deciding_passing() -> Weight { - // Proof Size summary in bytes: - // Measured: `781` - // Estimated: `159279` - // Minimum execution time: 90_000_000 picoseconds. - Weight::from_parts(95_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_begin_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `834` - // Estimated: `159279` - // Minimum execution time: 84_000_000 picoseconds. - Weight::from_parts(93_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_end_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `817` - // Estimated: `159279` - // Minimum execution time: 88_000_000 picoseconds. - Weight::from_parts(98_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_continue_not_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `834` - // Estimated: `159279` - // Minimum execution time: 81_000_000 picoseconds. - Weight::from_parts(93_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_continue_confirming() -> Weight { - // Proof Size summary in bytes: - // Measured: `838` - // Estimated: `159279` - // Minimum execution time: 74_000_000 picoseconds. - Weight::from_parts(77_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:2 w:2) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:1 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn nudge_referendum_approved() -> Weight { - // Proof Size summary in bytes: - // Measured: `838` - // Estimated: `317568` - // Minimum execution time: 105_000_000 picoseconds. - Weight::from_parts(123_000_000, 0) - .saturating_add(Weight::from_parts(0, 317568)) - .saturating_add(T::DbWeight::get().reads(5)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:1) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::MemberCount` (r:1 w:0) - /// Proof: `FellowshipCollective::MemberCount` (`max_values`: None, `max_size`: Some(14), added: 2489, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - fn nudge_referendum_rejected() -> Weight { - // Proof Size summary in bytes: - // Measured: `834` - // Estimated: `159279` - // Minimum execution time: 90_000_000 picoseconds. - Weight::from_parts(100_000_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `Preimage::StatusFor` (r:1 w:0) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::MetadataOf` (r:0 w:1) - /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn set_some_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `453` - // Estimated: `4365` - // Minimum execution time: 24_000_000 picoseconds. - Weight::from_parts(24_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipReferenda::ReferendumInfoFor` (r:1 w:0) - /// Proof: `FellowshipReferenda::ReferendumInfoFor` (`max_values`: None, `max_size`: Some(900), added: 3375, mode: `MaxEncodedLen`) - /// Storage: `FellowshipReferenda::MetadataOf` (r:1 w:1) - /// Proof: `FellowshipReferenda::MetadataOf` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) - fn clear_metadata() -> Weight { - // Proof Size summary in bytes: - // Measured: `319` - // Estimated: `4365` - // Minimum execution time: 21_000_000 picoseconds. - Weight::from_parts(23_000_000, 0) - .saturating_add(Weight::from_parts(0, 4365)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs deleted file mode 100644 index 0522420f2f5..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_ambassador_salary.rs +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_salary` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_salary -// --extrinsic=* -// --steps=2 -// --repeat=2 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_salary`. -pub struct WeightInfo(PhantomData); -impl pallet_salary::WeightInfo for WeightInfo { - /// Storage: `AmbassadorSalary::Status` (r:1 w:1) - /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - fn init() -> Weight { - // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `1541` - // Minimum execution time: 12_000_000 picoseconds. - Weight::from_parts(14_000_000, 0) - .saturating_add(Weight::from_parts(0, 1541)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorSalary::Status` (r:1 w:1) - /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - fn bump() -> Weight { - // Proof Size summary in bytes: - // Measured: `191` - // Estimated: `1541` - // Minimum execution time: 15_000_000 picoseconds. - Weight::from_parts(16_000_000, 0) - .saturating_add(Weight::from_parts(0, 1541)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorSalary::Status` (r:1 w:0) - /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) - /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - fn induct() -> Weight { - // Proof Size summary in bytes: - // Measured: `400` - // Estimated: `3551` - // Minimum execution time: 23_000_000 picoseconds. - Weight::from_parts(23_000_000, 0) - .saturating_add(Weight::from_parts(0, 3551)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorSalary::Status` (r:1 w:1) - /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) - /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - fn register() -> Weight { - // Proof Size summary in bytes: - // Measured: `467` - // Estimated: `3551` - // Minimum execution time: 27_000_000 picoseconds. - Weight::from_parts(28_000_000, 0) - .saturating_add(Weight::from_parts(0, 3551)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `AmbassadorSalary::Status` (r:1 w:1) - /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) - /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn payout() -> Weight { - // Proof Size summary in bytes: - // Measured: `879` - // Estimated: `4344` - // Minimum execution time: 68_000_000 picoseconds. - Weight::from_parts(72_000_000, 0) - .saturating_add(Weight::from_parts(0, 4344)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `AmbassadorSalary::Status` (r:1 w:1) - /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) - /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorCollective::Members` (r:1 w:0) - /// Proof: `AmbassadorCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn payout_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `879` - // Estimated: `4344` - // Minimum execution time: 69_000_000 picoseconds. - Weight::from_parts(70_000_000, 0) - .saturating_add(Weight::from_parts(0, 4344)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `AmbassadorSalary::Status` (r:1 w:1) - /// Proof: `AmbassadorSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `AmbassadorSalary::Claimant` (r:1 w:1) - /// Proof: `AmbassadorSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn check_payment() -> Weight { - // Proof Size summary in bytes: - // Measured: `479` - // Estimated: `3944` - // Minimum execution time: 27_000_000 picoseconds. - Weight::from_parts(28_000_000, 0) - .saturating_add(Weight::from_parts(0, 3944)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs deleted file mode 100644 index 37680b4e5df..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_salary_fellowship_salary.rs +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_salary` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-11, STEPS: `2`, REPEAT: `2`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `cob`, CPU: `` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/release/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_salary -// --extrinsic=* -// --steps=2 -// --repeat=2 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_salary`. -pub struct WeightInfo(PhantomData); -impl pallet_salary::WeightInfo for WeightInfo { - /// Storage: `FellowshipSalary::Status` (r:1 w:1) - /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - fn init() -> Weight { - // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `1541` - // Minimum execution time: 13_000_000 picoseconds. - Weight::from_parts(17_000_000, 0) - .saturating_add(Weight::from_parts(0, 1541)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipSalary::Status` (r:1 w:1) - /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - fn bump() -> Weight { - // Proof Size summary in bytes: - // Measured: `224` - // Estimated: `1541` - // Minimum execution time: 15_000_000 picoseconds. - Weight::from_parts(18_000_000, 0) - .saturating_add(Weight::from_parts(0, 1541)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipSalary::Status` (r:1 w:0) - /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) - /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - fn induct() -> Weight { - // Proof Size summary in bytes: - // Measured: `395` - // Estimated: `3551` - // Minimum execution time: 22_000_000 picoseconds. - Weight::from_parts(25_000_000, 0) - .saturating_add(Weight::from_parts(0, 3551)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `FellowshipSalary::Status` (r:1 w:1) - /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) - /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - fn register() -> Weight { - // Proof Size summary in bytes: - // Measured: `462` - // Estimated: `3551` - // Minimum execution time: 26_000_000 picoseconds. - Weight::from_parts(29_000_000, 0) - .saturating_add(Weight::from_parts(0, 3551)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `FellowshipSalary::Status` (r:1 w:1) - /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) - /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn payout() -> Weight { - // Proof Size summary in bytes: - // Measured: `774` - // Estimated: `4239` - // Minimum execution time: 67_000_000 picoseconds. - Weight::from_parts(74_000_000, 0) - .saturating_add(Weight::from_parts(0, 4239)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `FellowshipSalary::Status` (r:1 w:1) - /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) - /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - /// Storage: `FellowshipCollective::Members` (r:1 w:0) - /// Proof: `FellowshipCollective::Members` (`max_values`: None, `max_size`: Some(42), added: 2517, mode: `MaxEncodedLen`) - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::RelevantMessagingState` (r:1 w:0) - /// Proof: `ParachainSystem::RelevantMessagingState` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpStatus` (r:1 w:1) - /// Proof: `XcmpQueue::OutboundXcmpStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `XcmpQueue::OutboundXcmpMessages` (r:0 w:1) - /// Proof: `XcmpQueue::OutboundXcmpMessages` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn payout_other() -> Weight { - // Proof Size summary in bytes: - // Measured: `774` - // Estimated: `4239` - // Minimum execution time: 66_000_000 picoseconds. - Weight::from_parts(71_000_000, 0) - .saturating_add(Weight::from_parts(0, 4239)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(7)) - } - /// Storage: `FellowshipSalary::Status` (r:1 w:1) - /// Proof: `FellowshipSalary::Status` (`max_values`: Some(1), `max_size`: Some(56), added: 551, mode: `MaxEncodedLen`) - /// Storage: `FellowshipSalary::Claimant` (r:1 w:1) - /// Proof: `FellowshipSalary::Claimant` (`max_values`: None, `max_size`: Some(86), added: 2561, mode: `MaxEncodedLen`) - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn check_payment() -> Weight { - // Proof Size summary in bytes: - // Measured: `512` - // Estimated: `3977` - // Minimum execution time: 26_000_000 picoseconds. - Weight::from_parts(27_000_000, 0) - .saturating_add(Weight::from_parts(0, 3977)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs deleted file mode 100644 index cf5610df665..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_scheduler.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_scheduler` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_scheduler -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_scheduler`. -pub struct WeightInfo(PhantomData); -impl pallet_scheduler::WeightInfo for WeightInfo { - /// Storage: `Scheduler::IncompleteSince` (r:1 w:1) - /// Proof: `Scheduler::IncompleteSince` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn service_agendas_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `31` - // Estimated: `1489` - // Minimum execution time: 3_441_000 picoseconds. - Weight::from_parts(3_604_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 200]`. - fn service_agenda_base(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `77 + s * (177 ±0)` - // Estimated: `159279` - // Minimum execution time: 2_879_000 picoseconds. - Weight::from_parts(2_963_000, 0) - .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 3_764 - .saturating_add(Weight::from_parts(909_557, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_task_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_172_000 picoseconds. - Weight::from_parts(5_294_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `Preimage::PreimageFor` (r:1 w:1) - /// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`) - /// Storage: `Preimage::StatusFor` (r:1 w:1) - /// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`) - /// The range of component `s` is `[128, 4194304]`. - fn service_task_fetched(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `213 + s * (1 ±0)` - // Estimated: `3678 + s * (1 ±0)` - // Minimum execution time: 19_704_000 picoseconds. - Weight::from_parts(19_903_000, 0) - .saturating_add(Weight::from_parts(0, 3678)) - // Standard Error: 5 - .saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - .saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into())) - } - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - fn service_task_named() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_359_000 picoseconds. - Weight::from_parts(6_599_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_task_periodic() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_217_000 picoseconds. - Weight::from_parts(5_333_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - fn execute_dispatch_signed() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_406_000 picoseconds. - Weight::from_parts(2_541_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - fn execute_dispatch_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_370_000 picoseconds. - Weight::from_parts(2_561_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 199]`. - fn schedule(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `77 + s * (177 ±0)` - // Estimated: `159279` - // Minimum execution time: 11_784_000 picoseconds. - Weight::from_parts(5_574_404, 0) - .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 7_217 - .saturating_add(Weight::from_parts(1_035_248, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Lookup` (r:0 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 200]`. - fn cancel(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `77 + s * (177 ±0)` - // Estimated: `159279` - // Minimum execution time: 16_373_000 picoseconds. - Weight::from_parts(3_088_135, 0) - .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 7_095 - .saturating_add(Weight::from_parts(1_745_270, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Scheduler::Lookup` (r:1 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// The range of component `s` is `[0, 199]`. - fn schedule_named(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `468 + s * (179 ±0)` - // Estimated: `159279` - // Minimum execution time: 14_822_000 picoseconds. - Weight::from_parts(9_591_402, 0) - .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 7_151 - .saturating_add(Weight::from_parts(1_058_408, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Scheduler::Lookup` (r:1 w:1) - /// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`) - /// Storage: `Scheduler::Agenda` (r:1 w:1) - /// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`) - /// The range of component `s` is `[1, 200]`. - fn cancel_named(s: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `509 + s * (179 ±0)` - // Estimated: `159279` - // Minimum execution time: 18_541_000 picoseconds. - Weight::from_parts(6_522_239, 0) - .saturating_add(Weight::from_parts(0, 159279)) - // Standard Error: 8_349 - .saturating_add(Weight::from_parts(1_760_431, 0).saturating_mul(s.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs deleted file mode 100644 index 2ac0804df89..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_session.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_session` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_session -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_session`. -pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:1 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn set_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `270` - // Estimated: `3735` - // Minimum execution time: 16_663_000 picoseconds. - Weight::from_parts(17_246_000, 0) - .saturating_add(Weight::from_parts(0, 3735)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `Session::NextKeys` (r:1 w:1) - /// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `Session::KeyOwner` (r:0 w:1) - /// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn purge_keys() -> Weight { - // Proof Size summary in bytes: - // Measured: `242` - // Estimated: `3707` - // Minimum execution time: 11_850_000 picoseconds. - Weight::from_parts(12_204_000, 0) - .saturating_add(Weight::from_parts(0, 3707)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs deleted file mode 100644 index ca06f43f92e..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `49` - // Estimated: `1493` - // Minimum execution time: 7_863_000 picoseconds. - Weight::from_parts(8_183_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_460_000 picoseconds. - Weight::from_parts(3_577_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs deleted file mode 100644 index c60a79d91da..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_utility.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_utility` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=collectives-polkadot-dev -// --wasm-execution=compiled -// --pallet=pallet_utility -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_utility`. -pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { - /// The range of component `c` is `[0, 1000]`. - fn batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_650_000 picoseconds. - Weight::from_parts(7_474_437, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_625 - .saturating_add(Weight::from_parts(4_996_146, 0).saturating_mul(c.into())) - } - fn as_derivative() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 4_612_000 picoseconds. - Weight::from_parts(4_774_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn batch_all(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_744_000 picoseconds. - Weight::from_parts(10_889_913, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_281 - .saturating_add(Weight::from_parts(5_218_293, 0).saturating_mul(c.into())) - } - fn dispatch_as() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 8_673_000 picoseconds. - Weight::from_parts(8_980_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// The range of component `c` is `[0, 1000]`. - fn force_batch(c: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_744_000 picoseconds. - Weight::from_parts(7_801_721, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_395 - .saturating_add(Weight::from_parts(5_000_971, 0).saturating_mul(c.into())) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs deleted file mode 100644 index 57e50284147..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/pallet_xcm.rs +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_xcm` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-10, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024 - -// Executed Command: -// target/production/polkadot-parachain -// benchmark -// pallet -// --steps=50 -// --repeat=20 -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_xcm -// --chain=collectives-polkadot-dev -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_xcm`. -pub struct WeightInfo(PhantomData); -impl pallet_xcm::WeightInfo for WeightInfo { - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn send() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 25_050_000 picoseconds. - Weight::from_parts(26_382_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainInfo::ParachainId` (r:1 w:0) - /// Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn teleport_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `69` - // Estimated: `1489` - // Minimum execution time: 21_625_000 picoseconds. - Weight::from_parts(22_076_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - .saturating_add(T::DbWeight::get().reads(1)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn reserve_transfer_assets() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `Benchmark::Override` (r:0 w:0) - /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn execute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. - Weight::from_parts(18_446_744_073_709_551_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 7_076_000 picoseconds. - Weight::from_parts(7_378_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:0 w:1) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_default_xcm_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_327_000 picoseconds. - Weight::from_parts(2_454_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_subscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `145` - // Estimated: `3610` - // Minimum execution time: 29_080_000 picoseconds. - Weight::from_parts(29_886_000, 0) - .saturating_add(Weight::from_parts(0, 3610)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn force_unsubscribe_version_notify() -> Weight { - // Proof Size summary in bytes: - // Measured: `363` - // Estimated: `3828` - // Minimum execution time: 30_746_000 picoseconds. - Weight::from_parts(31_631_000, 0) - .saturating_add(Weight::from_parts(0, 3828)) - .saturating_add(T::DbWeight::get().reads(7)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::XcmExecutionSuspended` (r:0 w:1) - /// Proof: `PolkadotXcm::XcmExecutionSuspended` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn force_suspension() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 2_208_000 picoseconds. - Weight::from_parts(2_341_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `PolkadotXcm::SupportedVersion` (r:4 w:2) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_supported_version() -> Weight { - // Proof Size summary in bytes: - // Measured: `162` - // Estimated: `11052` - // Minimum execution time: 16_239_000 picoseconds. - Weight::from_parts(16_881_000, 0) - .saturating_add(Weight::from_parts(0, 11052)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifiers` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifiers` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notifiers() -> Weight { - // Proof Size summary in bytes: - // Measured: `166` - // Estimated: `11056` - // Minimum execution time: 16_711_000 picoseconds. - Weight::from_parts(16_944_000, 0) - .saturating_add(Weight::from_parts(0, 11056)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:5 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn already_notified_target() -> Weight { - // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `13538` - // Minimum execution time: 18_142_000 picoseconds. - Weight::from_parts(18_470_000, 0) - .saturating_add(Weight::from_parts(0, 13538)) - .saturating_add(T::DbWeight::get().reads(5)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:2 w:1) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn notify_current_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `212` - // Estimated: `6152` - // Minimum execution time: 27_687_000 picoseconds. - Weight::from_parts(28_250_000, 0) - .saturating_add(Weight::from_parts(0, 6152)) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:3 w:0) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn notify_target_migration_fail() -> Weight { - // Proof Size summary in bytes: - // Measured: `206` - // Estimated: `8621` - // Minimum execution time: 9_675_000 picoseconds. - Weight::from_parts(9_992_000, 0) - .saturating_add(Weight::from_parts(0, 8621)) - .saturating_add(T::DbWeight::get().reads(3)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn migrate_version_notify_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `173` - // Estimated: `11063` - // Minimum execution time: 16_597_000 picoseconds. - Weight::from_parts(17_248_000, 0) - .saturating_add(Weight::from_parts(0, 11063)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::VersionNotifyTargets` (r:4 w:2) - /// Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) - /// Proof: `ParachainSystem::UpwardDeliveryFeeFactor` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) - /// Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) - /// Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) - /// Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn migrate_and_notify_old_targets() -> Weight { - // Proof Size summary in bytes: - // Measured: `215` - // Estimated: `11105` - // Minimum execution time: 34_649_000 picoseconds. - Weight::from_parts(35_475_000, 0) - .saturating_add(Weight::from_parts(0, 11105)) - .saturating_add(T::DbWeight::get().reads(10)) - .saturating_add(T::DbWeight::get().writes(4)) - } - /// Storage: `PolkadotXcm::QueryCounter` (r:1 w:1) - /// Proof: `PolkadotXcm::QueryCounter` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `PolkadotXcm::Queries` (r:0 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn new_query() -> Weight { - // Proof Size summary in bytes: - // Measured: `103` - // Estimated: `1588` - // Minimum execution time: 4_619_000 picoseconds. - Weight::from_parts(4_756_000, 0) - .saturating_add(Weight::from_parts(0, 1588)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `PolkadotXcm::Queries` (r:1 w:1) - /// Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) - fn take_response() -> Weight { - // Proof Size summary in bytes: - // Measured: `7740` - // Estimated: `11205` - // Minimum execution time: 26_721_000 picoseconds. - Weight::from_parts(27_412_000, 0) - .saturating_add(Weight::from_parts(0, 11205)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/paritydb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/paritydb_weights.rs deleted file mode 100644 index 25679703831..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/paritydb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// `ParityDB` can be enabled with a feature flag, but is still experimental. These weights - /// are available for brave runtime engineers who may want to try this out as default. - pub const ParityDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 8_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 50_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::ParityDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/rocksdb_weights.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/rocksdb_weights.rs deleted file mode 100644 index 3dd817aa6f1..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/weights/rocksdb_weights.rs +++ /dev/null @@ -1,63 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod constants { - use frame_support::{ - parameter_types, - weights::{constants, RuntimeDbWeight}, - }; - - parameter_types! { - /// By default, Substrate uses `RocksDB`, so this will be the weight used throughout - /// the runtime. - pub const RocksDbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 25_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - write: 100_000 * constants::WEIGHT_REF_TIME_PER_NANOS, - }; - } - - #[cfg(test)] - mod test_db_weights { - use super::constants::RocksDbWeight as W; - use frame_support::weights::constants; - - /// Checks that all weights exist and have sane values. - // NOTE: If this test fails but you are sure that the generated values are fine, - // you can delete it. - #[test] - fn sane() { - // At least 1 µs. - assert!( - W::get().reads(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Read weight should be at least 1 µs." - ); - assert!( - W::get().writes(1).ref_time() >= constants::WEIGHT_REF_TIME_PER_MICROS, - "Write weight should be at least 1 µs." - ); - // At most 1 ms. - assert!( - W::get().reads(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Read weight should be at most 1 ms." - ); - assert!( - W::get().writes(1).ref_time() <= constants::WEIGHT_REF_TIME_PER_MILLIS, - "Write weight should be at most 1 ms." - ); - } - } -} diff --git a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs deleted file mode 100644 index 71845650bd6..00000000000 --- a/cumulus/parachains/runtimes/collectives/collectives-polkadot/src/xcm_config.rs +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - AccountId, AllPalletsWithSystem, Balances, BaseDeliveryFee, FeeAssetId, Fellows, ParachainInfo, - ParachainSystem, PolkadotXcm, Runtime, RuntimeCall, RuntimeEvent, RuntimeOrigin, - TransactionByteFee, WeightToFee, XcmpQueue, -}; -use frame_support::{ - match_types, parameter_types, - traits::{ConstU32, Contains, Everything, Nothing}, - weights::Weight, -}; -use frame_system::EnsureRoot; -use pallet_xcm::XcmPassthrough; -use parachains_common::{impls::ToStakingPot, xcm_config::ConcreteAssetFromSystem}; -use polkadot_parachain_primitives::primitives::Sibling; -use polkadot_runtime_common::xcm_sender::ExponentialPrice; -use xcm::latest::prelude::*; -use xcm_builder::{ - AccountId32Aliases, AllowExplicitUnpaidExecutionFrom, AllowKnownQueryResponses, - AllowSubscriptionsFrom, AllowTopLevelPaidExecutionFrom, CurrencyAdapter, - DenyReserveTransferToRelayChain, DenyThenTry, EnsureXcmOrigin, FixedWeightBounds, IsConcrete, - LocatableAssetId, OriginToPluralityVoice, ParentAsSuperuser, ParentIsPreset, - RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, - SignedAccountId32AsNative, SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, - TrailingSetTopicAsId, UsingComponents, WithComputedOrigin, WithUniqueTopic, -}; -use xcm_executor::{traits::WithOriginFilter, XcmExecutor}; - -const FELLOWSHIP_ADMIN_INDEX: u32 = 1; - -parameter_types! { - pub const DotLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: Option = Some(NetworkId::Polkadot); - pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); - pub UniversalLocation: InteriorMultiLocation = - X2(GlobalConsensus(RelayNetwork::get().unwrap()), Parachain(ParachainInfo::parachain_id().into())); - pub CheckingAccount: AccountId = PolkadotXcm::check_account(); - pub const GovernanceLocation: MultiLocation = MultiLocation::parent(); - pub const FellowshipAdminBodyId: BodyId = BodyId::Index(FELLOWSHIP_ADMIN_INDEX); - pub AssetHub: MultiLocation = (Parent, Parachain(1000)).into(); - pub AssetHubUsdtId: AssetId = (PalletInstance(50), GeneralIndex(1984)).into(); - pub UsdtAssetHub: LocatableAssetId = LocatableAssetId { - location: AssetHub::get(), - asset_id: AssetHubUsdtId::get(), - }; - pub DotAssetHub: LocatableAssetId = LocatableAssetId { - location: AssetHub::get(), - asset_id: DotLocation::get().into(), - }; -} - -/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used -/// when determining ownership of accounts for asset transacting and when attempting to use XCM -/// `Transact` in order to determine the dispatch Origin. -pub type LocationToAccountId = ( - // The parent (Relay-chain) origin converts to the parent `AccountId`. - ParentIsPreset, - // Sibling parachain origins convert to AccountId via the `ParaId::into`. - SiblingParachainConvertsVia, - // Straight up local `AccountId32` origins just alias directly to `AccountId`. - AccountId32Aliases, -); - -/// Means for transacting the native currency on this chain. -pub type CurrencyTransactor = CurrencyAdapter< - // Use this currency: - Balances, - // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Convert an XCM MultiLocation into a local account id: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We don't track any teleports of `Balances`. - (), ->; - -/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// biases the kind of local `Origin` it will become. -pub type XcmOriginToTransactDispatchOrigin = ( - // Sovereign account converter; this attempts to derive an `AccountId` from the origin location - // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. - SovereignSignedViaLocation, - // Native converter for Relay-chain (Parent) location; will convert to a `Relay` origin when - // recognised. - RelayChainAsNative, - // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when - // recognised. - SiblingParachainAsNative, - // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a - // transaction from the Root origin. - ParentAsSuperuser, - // Native signed account converter; this just converts an `AccountId32` origin into a normal - // `RuntimeOrigin::Signed` origin of the same 32-byte value. - SignedAccountId32AsNative, - // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. - XcmPassthrough, -); - -parameter_types! { - /// The amount of weight an XCM operation takes. This is a safe overestimate. - pub const BaseXcmWeight: Weight = Weight::from_parts(1_000_000_000, 1024); - /// A temporary weight value for each XCM instruction. - /// NOTE: This should be removed after we account for PoV weights. - pub const TempFixedXcmWeight: Weight = Weight::from_parts(1_000_000_000, 0); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; - // Fellows pluralistic body. - pub const FellowsBodyId: BodyId = BodyId::Technical; -} - -match_types! { - pub type ParentOrParentsPlurality: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(Plurality { .. }) } - }; - pub type ParentOrSiblings: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(_) } - }; -} - -/// A call filter for the XCM Transact instruction. This is a temporary measure until we properly -/// account for proof size weights. -/// -/// Calls that are allowed through this filter must: -/// 1. Have a fixed weight; -/// 2. Cannot lead to another call being made; -/// 3. Have a defined proof size weight, e.g. no unbounded vecs in call parameters. -pub struct SafeCallFilter; -impl Contains for SafeCallFilter { - fn contains(call: &RuntimeCall) -> bool { - #[cfg(feature = "runtime-benchmarks")] - { - if matches!(call, RuntimeCall::System(frame_system::Call::remark_with_event { .. })) { - return true - } - } - - matches!( - call, - RuntimeCall::System( - frame_system::Call::set_heap_pages { .. } | - frame_system::Call::set_code { .. } | - frame_system::Call::set_code_without_checks { .. } | - frame_system::Call::kill_prefix { .. }, - ) | RuntimeCall::ParachainSystem(..) | - RuntimeCall::Timestamp(..) | - RuntimeCall::Balances(..) | - RuntimeCall::CollatorSelection( - pallet_collator_selection::Call::set_desired_candidates { .. } | - pallet_collator_selection::Call::set_candidacy_bond { .. } | - pallet_collator_selection::Call::register_as_candidate { .. } | - pallet_collator_selection::Call::leave_intent { .. } | - pallet_collator_selection::Call::set_invulnerables { .. } | - pallet_collator_selection::Call::add_invulnerable { .. } | - pallet_collator_selection::Call::remove_invulnerable { .. }, - ) | RuntimeCall::Session(pallet_session::Call::purge_keys { .. }) | - RuntimeCall::PolkadotXcm(pallet_xcm::Call::force_xcm_version { .. }) | - RuntimeCall::XcmpQueue(..) | - RuntimeCall::MessageQueue(..) | - RuntimeCall::Alliance( - // `init_members` accepts unbounded vecs as arguments, - // but the call can be initiated only by root origin. - pallet_alliance::Call::init_members { .. } | - pallet_alliance::Call::vote { .. } | - pallet_alliance::Call::disband { .. } | - pallet_alliance::Call::set_rule { .. } | - pallet_alliance::Call::announce { .. } | - pallet_alliance::Call::remove_announcement { .. } | - pallet_alliance::Call::join_alliance { .. } | - pallet_alliance::Call::nominate_ally { .. } | - pallet_alliance::Call::elevate_ally { .. } | - pallet_alliance::Call::give_retirement_notice { .. } | - pallet_alliance::Call::retire { .. } | - pallet_alliance::Call::kick_member { .. } | - pallet_alliance::Call::close { .. } | - pallet_alliance::Call::abdicate_fellow_status { .. }, - ) | RuntimeCall::AllianceMotion( - pallet_collective::Call::vote { .. } | - pallet_collective::Call::disapprove_proposal { .. } | - pallet_collective::Call::close { .. }, - ) | RuntimeCall::FellowshipCollective( - pallet_ranked_collective::Call::add_member { .. } | - pallet_ranked_collective::Call::promote_member { .. } | - pallet_ranked_collective::Call::demote_member { .. } | - pallet_ranked_collective::Call::remove_member { .. }, - ) | RuntimeCall::FellowshipCore( - pallet_core_fellowship::Call::bump { .. } | - pallet_core_fellowship::Call::set_params { .. } | - pallet_core_fellowship::Call::set_active { .. } | - pallet_core_fellowship::Call::approve { .. } | - pallet_core_fellowship::Call::induct { .. } | - pallet_core_fellowship::Call::promote { .. } | - pallet_core_fellowship::Call::offboard { .. } | - pallet_core_fellowship::Call::submit_evidence { .. } | - pallet_core_fellowship::Call::import { .. }, - ) - ) - } -} - -pub type Barrier = TrailingSetTopicAsId< - DenyThenTry< - DenyReserveTransferToRelayChain, - ( - // Allow local users to buy weight credit. - TakeWeightCredit, - // Expected responses are OK. - AllowKnownQueryResponses, - // Allow XCMs with some computed origins to pass through. - WithComputedOrigin< - ( - // If the message is one that immediately attempts to pay for execution, then - // allow it. - AllowTopLevelPaidExecutionFrom, - // Parent and its pluralities (i.e. governance bodies) get free execution. - AllowExplicitUnpaidExecutionFrom, - // Subscriptions for version tracking are OK. - AllowSubscriptionsFrom, - ), - UniversalLocation, - ConstU32<8>, - >, - ), - >, ->; - -/// Cases where a remote origin is accepted as trusted Teleporter for a given asset: -/// - DOT with the parent Relay Chain and sibling parachains. -pub type TrustedTeleporters = ConcreteAssetFromSystem; - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = XcmRouter; - type AssetTransactor = CurrencyTransactor; - type OriginConverter = XcmOriginToTransactDispatchOrigin; - // Collectives does not recognize a reserve location for any asset. Users must teleport DOT - // where allowed (e.g. with the Relay Chain). - type IsReserve = (); - type IsTeleporter = TrustedTeleporters; - type UniversalLocation = UniversalLocation; - type Barrier = Barrier; - type Weigher = FixedWeightBounds; - type Trader = - UsingComponents>; - type ResponseHandler = PolkadotXcm; - type AssetTrap = PolkadotXcm; - type AssetClaims = PolkadotXcm; - type SubscriptionService = PolkadotXcm; - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = WithOriginFilter; - type SafeCallFilter = SafeCallFilter; - type Aliasers = Nothing; -} - -/// Converts a local signed origin into an XCM multilocation. -/// Forms the basis for local origins sending/executing XCMs. -pub type LocalOriginToLocation = SignedToAccountId32; - -pub type PriceForParentDelivery = - ExponentialPrice; - -/// The means for routing XCM messages which are not for local execution into the right message -/// queues. -pub type XcmRouter = WithUniqueTopic<( - // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, - // ..and XCMP to communicate with the sibling chains. - XcmpQueue, -)>; - -/// Type to convert the Fellows origin to a Plurality `MultiLocation` value. -pub type FellowsToPlurality = OriginToPluralityVoice; - -impl pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - // We only allow the Fellows to send messages. - type SendXcmOrigin = EnsureXcmOrigin; - type XcmRouter = XcmRouter; - // We support local origins dispatching XCM executions in principle... - type ExecuteXcmOrigin = EnsureXcmOrigin; - // ... but disallow generic XCM execution. As a result only teleports are allowed. - type XcmExecuteFilter = Nothing; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Nothing; // This parachain is not meant as a reserve location. - type Weigher = FixedWeightBounds; - type UniversalLocation = UniversalLocation; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; - type Currency = Balances; - type CurrencyMatcher = (); - type TrustedLockers = (); - type SovereignAccountOf = LocationToAccountId; - type MaxLockers = ConstU32<8>; - type WeightInfo = crate::weights::pallet_xcm::WeightInfo; - type AdminOrigin = EnsureRoot; - type MaxRemoteLockConsumers = ConstU32<0>; - type RemoteLockConsumerIdentifier = (); -} - -impl cumulus_pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = XcmExecutor; -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml deleted file mode 100644 index 7f5feb1c880..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/Cargo.toml +++ /dev/null @@ -1,138 +0,0 @@ -[package] -name = "glutton-runtime" -version = "1.0.0" -description = "Glutton parachain runtime." -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} - -# Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} - -# Cumulus -cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } -cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } -cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } -parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } -parachains-common = { path = "../../../common", default-features = false } - -[build-dependencies] -substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } - -[features] -default = [ "std" ] -runtime-benchmarks = [ - "cumulus-pallet-parachain-system/runtime-benchmarks", - "cumulus-primitives-core/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system-benchmarking/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-glutton/runtime-benchmarks", - "pallet-message-queue/runtime-benchmarks", - "pallet-sudo/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "parachains-common/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -std = [ - "codec/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-xcm/std", - "cumulus-primitives-aura/std", - "cumulus-primitives-core/std", - "cumulus-primitives-timestamp/std", - "frame-benchmarking?/std", - "frame-executive/std", - "frame-support/std", - "frame-system-benchmarking?/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "frame-try-runtime?/std", - "pallet-aura/std", - "pallet-glutton/std", - "pallet-message-queue/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "parachain-info/std", - "parachains-common/std", - "scale-info/std", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-genesis-builder/std", - "sp-inherents/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-storage/std", - "sp-transaction-pool/std", - "sp-version/std", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] -try-runtime = [ - "cumulus-pallet-aura-ext/try-runtime", - "cumulus-pallet-parachain-system/try-runtime", - "cumulus-pallet-xcm/try-runtime", - "frame-executive/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "frame-try-runtime/try-runtime", - "pallet-aura/try-runtime", - "pallet-glutton/try-runtime", - "pallet-message-queue/try-runtime", - "pallet-sudo/try-runtime", - "pallet-timestamp/try-runtime", - "parachain-info/try-runtime", - "sp-runtime/try-runtime", -] - -experimental = [ "pallet-aura/experimental" ] - -# A feature that should be enabled when the runtime should be built for on-chain -# deployment. This will disable stuff that shouldn't be part of the on-chain wasm -# to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/build.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/build.rs deleted file mode 100644 index 1580e6f07be..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/build.rs +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use substrate_wasm_builder::WasmBuilder; - -fn main() { - WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs deleted file mode 100644 index ef0734df25f..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/lib.rs +++ /dev/null @@ -1,528 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # Glutton Runtime -//! -//! The purpose of the Glutton parachain is to do stress testing on the Kusama -//! network. -//! -//! There may be multiple instances of the Glutton parachain deployed and -//! connected to Kusama. -//! -//! These parachains are not holding any real value. Their purpose is to stress -//! test the network. -//! -//! ### Governance -//! -//! Glutton defers its governance (namely, its `Root` origin), to its Relay -//! Chain parent, Kusama. -//! -//! ### XCM -//! -//! Since the main goal of Glutton is solely stress testing, the parachain will -//! only be able receive XCM messages from Kusama via DMP. This way the Glutton -//! parachains will be able to listen for upgrades that are coming from the -//! Relay chain. - -#![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -pub mod weights; -pub mod xcm_config; - -use cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; -use sp_api::impl_runtime_apis; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, -}; -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -use cumulus_primitives_core::AggregateMessageOrigin; -pub use frame_support::{ - construct_runtime, - dispatch::DispatchClass, - genesis_builder_helper::{build_config, create_default_config}, - parameter_types, - traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, IsInVec, Randomness, - }, - weights::{ - constants::{ - BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_REF_TIME_PER_SECOND, - }, - IdentityFee, Weight, - }, - PalletId, StorageValue, -}; -use frame_system::{ - limits::{BlockLength, BlockWeights}, - EnsureRoot, -}; -use parachains_common::{AccountId, Signature}; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("glutton"), - impl_name: create_runtime_str!("glutton"), - authoring_version: 1, - spec_version: 1_004_000, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, - state_version: 1, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. -/// This is used to limit the maximal weight of a single extrinsic. -const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); -/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used -/// by Operational extrinsics. -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for .5 seconds of compute with a 12 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), - cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, -); - -/// Maximum number of blocks simultaneously accepted by the Runtime, not yet included -/// into the relay chain. -const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; -/// How many parachain blocks are processed by the relay chain per parent. Limits the -/// number of blocks authored per slot. -const BLOCK_PROCESSING_VELOCITY: u32 = 2; -/// Relay chain slot duration, in milliseconds. -const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; - -/// This determines the average expected block time that we are targeting. -/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. -/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked -/// up by `pallet_aura` to implement `fn slot_duration()`. -/// -/// Change this to adjust the block time. -pub const MILLISECS_PER_BLOCK: u64 = 6000; -pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - -parameter_types! { - pub const BlockHashCount: BlockNumber = 4096; - pub const Version: RuntimeVersion = VERSION; - pub RuntimeBlockLength: BlockLength = - BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - pub const SS58Prefix: u8 = 2; -} - -impl frame_system::Config for Runtime { - type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; - type Nonce = Nonce; - type Hash = Hash; - type Hashing = BlakeTwo256; - type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; - type BlockHashCount = BlockHashCount; - type Version = Version; - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = RuntimeBlockWeights; - type BlockLength = RuntimeBlockLength; - type SS58Prefix = SS58Prefix; - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -parameter_types! { - // We do anything the parent chain tells us in this runtime. - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT.saturating_div(2); - pub const RelayOrigin: AggregateMessageOrigin = AggregateMessageOrigin::Parent; -} - -type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< - Runtime, - RELAY_CHAIN_SLOT_DURATION_MILLIS, - BLOCK_PROCESSING_VELOCITY, - UNINCLUDED_SEGMENT_CAPACITY, ->; - -impl cumulus_pallet_parachain_system::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type DmpQueue = frame_support::traits::EnqueueWithOrigin; - type OutboundXcmpMessageSource = (); - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = (); - type ReservedXcmpWeight = (); - type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases; - type ConsensusHook = ConsensusHook; - type WeightInfo = weights::cumulus_pallet_parachain_system::WeightInfo; -} - -parameter_types! { - pub MessageQueueServiceWeight: Weight = Perbill::from_percent(80) * - RuntimeBlockWeights::get().max_block; -} - -impl pallet_message_queue::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_message_queue::WeightInfo; - #[cfg(feature = "runtime-benchmarks")] - type MessageProcessor = pallet_message_queue::mock_helpers::NoopMessageProcessor< - cumulus_primitives_core::AggregateMessageOrigin, - >; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessageProcessor = xcm_builder::ProcessXcmMessage< - AggregateMessageOrigin, - xcm_executor::XcmExecutor, - RuntimeCall, - >; - type Size = u32; - type QueueChangeHandler = (); - type QueuePausedQuery = (); // No XCMP queue pallet deployed. - type HeapSize = sp_core::ConstU32<{ 64 * 1024 }>; - type MaxStale = sp_core::ConstU32<8>; - type ServiceWeight = MessageQueueServiceWeight; -} - -impl parachain_info::Config for Runtime {} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -impl pallet_timestamp::Config for Runtime { - type Moment = u64; - type OnTimestampSet = Aura; - #[cfg(feature = "experimental")] - type MinimumPeriod = ConstU64<0>; - #[cfg(not(feature = "experimental"))] - type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>; - type WeightInfo = weights::pallet_timestamp::WeightInfo; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = ConstU32<100_000>; - type AllowMultipleBlocksPerSlot = ConstBool; - #[cfg(feature = "experimental")] - type SlotDuration = ConstU64; -} - -impl pallet_glutton::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = weights::pallet_glutton::WeightInfo; - type AdminOrigin = EnsureRoot; -} - -impl pallet_sudo::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type WeightInfo = (); -} - -construct_runtime! { - pub enum Runtime - { - System: frame_system::{Pallet, Call, Storage, Config, Event} = 0, - ParachainSystem: cumulus_pallet_parachain_system::{ - Pallet, Call, Config, Storage, Inherent, Event, ValidateUnsigned, - } = 1, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 2, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent} = 3, - - // DMP handler. - CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Storage, Event, Origin} = 10, - MessageQueue: pallet_message_queue::{Pallet, Call, Storage, Event} = 11, - - // The main stage. - Glutton: pallet_glutton::{Pallet, Call, Storage, Event, Config} = 20, - - // Collator support - Aura: pallet_aura::{Pallet, Storage, Config} = 30, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Storage, Config} = 31, - - // Sudo. - Sudo: pallet_sudo::{Pallet, Call, Storage, Event, Config} = 255, - } -} - -/// Index of a transaction in the chain. -pub type Nonce = u32; -/// A hash of some data used by the chain. -pub type Hash = sp_core::H256; -/// An index to a block. -pub type BlockNumber = u32; -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block header type as expected by this runtime. -pub type Header = generic::Header; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - pallet_sudo::CheckOnlySudoAccount, - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, -); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic; -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, ->; - -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - define_benchmarks!( - [frame_system, SystemBench::] - [pallet_glutton, Glutton] - ); -} - -impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - - fn metadata_at_version(version: u32) -> Option { - Runtime::metadata_at_version(version) - } - - fn metadata_versions() -> sp_std::vec::Vec { - Runtime::metadata_versions() - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().into_inner() - } - } - - impl cumulus_primitives_aura::AuraUnincludedSegmentApi for Runtime { - fn can_build_upon( - included_hash: ::Hash, - slot: cumulus_primitives_aura::Slot, - ) -> bool { - ConsensusHook::can_build_upon(included_hash, slot) - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic( - extrinsic: ::Extrinsic, - ) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents(block: Block, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - use frame_system_benchmarking::Pallet as SystemBench; - - let mut list = Vec::::new(); - list_benchmarks!(list, extra); - - let storage_info = AllPalletsWithSystem::storage_info(); - - (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; - use sp_storage::TrackedStorageKey; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime { - fn setup_set_code_requirements(code: &sp_std::vec::Vec) -> Result<(), BenchmarkError> { - ParachainSystem::initialize_for_set_code_benchmark(code.len() as u32); - Ok(()) - } - - fn verify_set_code() { - System::assert_last_event(cumulus_pallet_parachain_system::Event::::ValidationFunctionStored.into()); - } - } - - use frame_support::traits::WhitelistedStorageKeys; - let whitelist: Vec = AllPalletsWithSystem::whitelisted_storage_keys(); - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - Ok(batches) - } - } - - impl sp_genesis_builder::GenesisBuilder for Runtime { - fn create_default_config() -> Vec { - create_default_config::() - } - - fn build_config(config: Vec) -> sp_genesis_builder::Result { - build_config::(config) - } - } -} - -cumulus_pallet_parachain_system::register_validate_block! { - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs deleted file mode 100644 index f787aa32701..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/cumulus_pallet_parachain_system.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `cumulus_pallet_parachain_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// statemint-dev -// --pallet -// cumulus_pallet_parachain_system -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/statemint/src/weights -// --steps -// 50 -// --repeat -// 20 - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `cumulus_pallet_parachain_system`. -pub struct WeightInfo(PhantomData); -impl cumulus_pallet_parachain_system::WeightInfo for WeightInfo { - /// Storage: ParachainSystem LastDmqMqcHead (r:1 w:1) - /// Proof Skipped: ParachainSystem LastDmqMqcHead (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: ParachainSystem ReservedDmpWeightOverride (r:1 w:0) - /// Proof Skipped: ParachainSystem ReservedDmpWeightOverride (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: ParachainSystem ProcessedDownwardMessages (r:0 w:1) - /// Proof Skipped: ParachainSystem ProcessedDownwardMessages (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: MessageQueue Pages (r:0 w:16) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - /// The range of component `n` is `[0, 1000]`. - fn enqueue_inbound_downward_messages(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `8013` - // Minimum execution time: 1_660_000 picoseconds. - Weight::from_parts(1_720_000, 0) - .saturating_add(Weight::from_parts(0, 8013)) - // Standard Error: 28_418 - .saturating_add(Weight::from_parts(24_636_963, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(4)) - } -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs deleted file mode 100644 index cf7ef948fd6..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/frame_system.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `frame_system` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-kusama-dev-1300")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=glutton-kusama-dev-1300 -// --wasm-execution=compiled -// --pallet=frame_system -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/glutton/glutton-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `frame_system`. -pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - /// The range of component `b` is `[0, 3932160]`. - fn remark(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_717_000 picoseconds. - Weight::from_parts(1_782_325, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 0 - .saturating_add(Weight::from_parts(387, 0).saturating_mul(b.into())) - } - /// The range of component `b` is `[0, 3932160]`. - fn remark_with_event(b: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 6_089_000 picoseconds. - Weight::from_parts(6_353_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 2 - .saturating_add(Weight::from_parts(1_788, 0).saturating_mul(b.into())) - } - /// Storage: `System::Digest` (r:1 w:1) - /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - /// Proof: UNKNOWN KEY `0x3a686561707061676573` (r:0 w:1) - fn set_heap_pages() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `1485` - // Minimum execution time: 3_389_000 picoseconds. - Weight::from_parts(3_605_000, 0) - .saturating_add(Weight::from_parts(0, 1485)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: `ParachainSystem::ValidationData` (r:1 w:0) - /// Proof: `ParachainSystem::ValidationData` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::UpgradeRestrictionSignal` (r:1 w:0) - /// Proof: `ParachainSystem::UpgradeRestrictionSignal` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::PendingValidationCode` (r:1 w:1) - /// Proof: `ParachainSystem::PendingValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) - /// Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::NewValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::NewValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `ParachainSystem::DidSetValidationCode` (r:0 w:1) - /// Proof: `ParachainSystem::DidSetValidationCode` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - fn set_code() -> Weight { - // Proof Size summary in bytes: - // Measured: `119` - // Estimated: `1604` - // Minimum execution time: 97_701_839_000 picoseconds. - Weight::from_parts(100_104_315_000, 0) - .saturating_add(Weight::from_parts(0, 1604)) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn set_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_638_000 picoseconds. - Weight::from_parts(1_726_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1_886 - .saturating_add(Weight::from_parts(809_561, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `i` is `[0, 1000]`. - fn kill_storage(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 1_569_000 picoseconds. - Weight::from_parts(1_690_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 963 - .saturating_add(Weight::from_parts(580_145, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(i.into()))) - } - /// Storage: `Skipped::Metadata` (r:0 w:0) - /// Proof: `Skipped::Metadata` (`max_values`: None, `max_size`: None, mode: `Measured`) - /// The range of component `p` is `[0, 1000]`. - fn kill_prefix(p: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `52 + p * (69 ±0)` - // Estimated: `46 + p * (70 ±0)` - // Minimum execution time: 3_039_000 picoseconds. - Weight::from_parts(3_090_000, 0) - .saturating_add(Weight::from_parts(0, 46)) - // Standard Error: 2_007 - .saturating_add(Weight::from_parts(1_269_045, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(p.into()))) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(p.into()))) - .saturating_add(Weight::from_parts(0, 70).saturating_mul(p.into())) - } -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs deleted file mode 100644 index 47f9d1ee105..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod cumulus_pallet_parachain_system; -pub mod pallet_glutton; -pub mod pallet_message_queue; -pub mod pallet_timestamp; diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs deleted file mode 100644 index e1b0c5bf232..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_glutton.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_glutton` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-kusama-dev-1300")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=glutton-kusama-dev-1300 -// --wasm-execution=compiled -// --pallet=pallet_glutton -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/glutton/glutton-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_glutton`. -pub struct WeightInfo(PhantomData); -impl pallet_glutton::WeightInfo for WeightInfo { - /// Storage: `Glutton::TrashDataCount` (r:1 w:1) - /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Glutton::TrashData` (r:0 w:1000) - /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - fn initialize_pallet_grow(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `87` - // Estimated: `1489` - // Minimum execution time: 8_925_000 picoseconds. - Weight::from_parts(9_186_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 3_091 - .saturating_add(Weight::from_parts(9_666_196, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - } - /// Storage: `Glutton::TrashDataCount` (r:1 w:1) - /// Proof: `Glutton::TrashDataCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Glutton::TrashData` (r:0 w:1000) - /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) - /// The range of component `n` is `[0, 1000]`. - fn initialize_pallet_shrink(n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `120` - // Estimated: `1489` - // Minimum execution time: 8_924_000 picoseconds. - Weight::from_parts(8_963_000, 0) - .saturating_add(Weight::from_parts(0, 1489)) - // Standard Error: 1_202 - .saturating_add(Weight::from_parts(1_139_080, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(n.into()))) - } - /// The range of component `i` is `[0, 100000]`. - fn waste_ref_time_iter(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 708_000 picoseconds. - Weight::from_parts(1_698_031, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 12 - .saturating_add(Weight::from_parts(106_500, 0).saturating_mul(i.into())) - } - /// Storage: `Glutton::TrashData` (r:5000 w:0) - /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) - /// The range of component `i` is `[0, 5000]`. - fn waste_proof_size_some(i: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `119115 + i * (1022 ±0)` - // Estimated: `990 + i * (3016 ±0)` - // Minimum execution time: 698_000 picoseconds. - Weight::from_parts(970_000, 0) - .saturating_add(Weight::from_parts(0, 990)) - // Standard Error: 4_022 - .saturating_add(Weight::from_parts(6_320_519, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(i.into()))) - .saturating_add(Weight::from_parts(0, 3016).saturating_mul(i.into())) - } - /// Storage: `Glutton::Storage` (r:1 w:0) - /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Glutton::Compute` (r:1 w:0) - /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Glutton::TrashData` (r:1737 w:0) - /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) - fn on_idle_high_proof_waste() -> Weight { - // Proof Size summary in bytes: - // Measured: `1900498` - // Estimated: `5239782` - // Minimum execution time: 100_079_897_000 picoseconds. - Weight::from_parts(100_515_306_000, 0) - .saturating_add(Weight::from_parts(0, 5239782)) - .saturating_add(T::DbWeight::get().reads(1739)) - } - /// Storage: `Glutton::Storage` (r:1 w:0) - /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Glutton::Compute` (r:1 w:0) - /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Glutton::TrashData` (r:5 w:0) - /// Proof: `Glutton::TrashData` (`max_values`: Some(65000), `max_size`: Some(1036), added: 3016, mode: `MaxEncodedLen`) - fn on_idle_low_proof_waste() -> Weight { - // Proof Size summary in bytes: - // Measured: `9548` - // Estimated: `16070` - // Minimum execution time: 100_237_009_000 picoseconds. - Weight::from_parts(100_472_213_000, 0) - .saturating_add(Weight::from_parts(0, 16070)) - .saturating_add(T::DbWeight::get().reads(7)) - } - /// Storage: `Glutton::Storage` (r:1 w:0) - /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Glutton::Compute` (r:1 w:0) - /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn empty_on_idle() -> Weight { - // Proof Size summary in bytes: - // Measured: `87` - // Estimated: `1493` - // Minimum execution time: 5_120_000 picoseconds. - Weight::from_parts(5_262_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - } - /// Storage: `Glutton::Compute` (r:0 w:1) - /// Proof: `Glutton::Compute` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set_compute() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_947_000 picoseconds. - Weight::from_parts(6_171_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: `Glutton::Storage` (r:0 w:1) - /// Proof: `Glutton::Storage` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set_storage() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 5_964_000 picoseconds. - Weight::from_parts(6_166_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - .saturating_add(T::DbWeight::get().writes(1)) - } -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs deleted file mode 100644 index a9f0cb07cfe..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_message_queue.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_message_queue` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westmint-dev"), DB CACHE: 1024 - -// Executed Command: -// ./target/release/polkadot-parachain -// benchmark -// pallet -// --chain -// westmint-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/westmint/src/weights - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::Weight}; -use sp_std::marker::PhantomData; - -/// Weight functions for `pallet_message_queue`. -pub struct WeightInfo(PhantomData); -impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn ready_ring_knit() -> Weight { - // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 12_192_000 picoseconds. - Weight::from_parts(12_192_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - fn ready_ring_unknit() -> Weight { - // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 10_447_000 picoseconds. - Weight::from_parts(10_447_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) - .saturating_add(T::DbWeight::get().reads(3)) - .saturating_add(T::DbWeight::get().writes(3)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn service_queue_base() -> Weight { - // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 4_851_000 picoseconds. - Weight::from_parts(4_851_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_342_000 picoseconds. - Weight::from_parts(6_342_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn service_page_base_no_completion() -> Weight { - // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_199_000 picoseconds. - Weight::from_parts(6_199_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn service_page_item() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 58_612_000 picoseconds. - Weight::from_parts(58_612_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - fn bump_service_head() -> Weight { - // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 7_296_000 picoseconds. - Weight::from_parts(7_296_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn reap_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 48_345_000 picoseconds. - Weight::from_parts(48_345_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_removed() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 56_441_000 picoseconds. - Weight::from_parts(56_441_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) - fn execute_overweight_page_updated() -> Weight { - // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 70_858_000 picoseconds. - Weight::from_parts(70_858_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(2)) - } -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs deleted file mode 100644 index 8edae065f1b..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/weights/pallet_timestamp.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright Parity Technologies (UK) Ltd. -// This file is part of Cumulus. - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! Autogenerated weights for `pallet_timestamp` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("asset-hub-kusama-dev")`, DB CACHE: 1024 - -// Executed Command: -// ./target/production/polkadot-parachain -// benchmark -// pallet -// --chain=asset-hub-kusama-dev -// --wasm-execution=compiled -// --pallet=pallet_timestamp -// --no-storage-info -// --no-median-slopes -// --no-min-squares -// --extrinsic=* -// --steps=50 -// --repeat=20 -// --json -// --header=./file_header.txt -// --output=./parachains/runtimes/assets/asset-hub-kusama/src/weights/ - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{traits::Get, weights::Weight}; -use core::marker::PhantomData; - -/// Weight functions for `pallet_timestamp`. -pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { - /// Storage: `Timestamp::Now` (r:1 w:1) - /// Proof: `Timestamp::Now` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Aura::CurrentSlot` (r:1 w:0) - /// Proof: `Aura::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - fn set() -> Weight { - // Proof Size summary in bytes: - // Measured: `86` - // Estimated: `1493` - // Minimum execution time: 9_313_000 picoseconds. - Weight::from_parts(9_775_000, 0) - .saturating_add(Weight::from_parts(0, 1493)) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().writes(1)) - } - fn on_finalize() -> Weight { - // Proof Size summary in bytes: - // Measured: `57` - // Estimated: `0` - // Minimum execution time: 3_322_000 picoseconds. - Weight::from_parts(3_577_000, 0) - .saturating_add(Weight::from_parts(0, 0)) - } -} diff --git a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/xcm_config.rs b/cumulus/parachains/runtimes/glutton/glutton-kusama/src/xcm_config.rs deleted file mode 100644 index fb7b78b79d2..00000000000 --- a/cumulus/parachains/runtimes/glutton/glutton-kusama/src/xcm_config.rs +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - AccountId, AllPalletsWithSystem, ParachainInfo, Runtime, RuntimeCall, RuntimeEvent, - RuntimeOrigin, -}; -use frame_support::{ - match_types, parameter_types, - traits::{Everything, Nothing}, - weights::Weight, -}; -use xcm::latest::prelude::*; -use xcm_builder::{ - AllowExplicitUnpaidExecutionFrom, FixedWeightBounds, ParentAsSuperuser, ParentIsPreset, - SovereignSignedViaLocation, -}; - -parameter_types! { - pub const KusamaLocation: MultiLocation = MultiLocation::parent(); - pub const KusamaNetwork: Option = Some(NetworkId::Kusama); - pub UniversalLocation: InteriorMultiLocation = X1(Parachain(ParachainInfo::parachain_id().into())); -} - -/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with Xcm's `Transact`. There is an `OriginKind` which can -/// bias the kind of local `Origin` it will become. -pub type XcmOriginToTransactDispatchOrigin = ( - // Sovereign account converter; this attempts to derive an `AccountId` from the origin location - // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. - SovereignSignedViaLocation, RuntimeOrigin>, - // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a - // transaction from the Root origin. - ParentAsSuperuser, -); - -match_types! { - pub type JustTheParent: impl Contains = { MultiLocation { parents:1, interior: Here } }; -} - -parameter_types! { - // One XCM operation is 1_000_000_000 weight - almost certainly a conservative estimate. - pub UnitWeightCost: Weight = Weight::from_parts(1_000_000_000, 64 * 1024); - pub const MaxInstructions: u32 = 100; - pub const MaxAssetsIntoHolding: u32 = 64; -} - -pub struct XcmConfig; -impl xcm_executor::Config for XcmConfig { - type RuntimeCall = RuntimeCall; - type XcmSender = (); // sending XCM not supported - type AssetTransactor = (); // balances not supported - type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = (); // balances not supported - type IsTeleporter = (); // balances not supported - type UniversalLocation = UniversalLocation; - type Barrier = AllowExplicitUnpaidExecutionFrom; - type Weigher = FixedWeightBounds; // balances not supported - type Trader = (); // balances not supported - type ResponseHandler = (); // Don't handle responses for now. - type AssetTrap = (); // don't trap for now - type AssetClaims = (); // don't claim for now - type SubscriptionService = (); // don't handle subscriptions for now - type PalletInstancesInfo = AllPalletsWithSystem; - type MaxAssetsIntoHolding = MaxAssetsIntoHolding; - type AssetLocker = (); - type AssetExchanger = (); - type FeeManager = (); - type MessageExporter = (); - type UniversalAliases = Nothing; - type CallDispatcher = RuntimeCall; - type SafeCallFilter = Everything; - type Aliasers = Nothing; -} - -impl cumulus_pallet_xcm::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type XcmExecutor = xcm_executor::XcmExecutor; -} diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index e23a7aeb22b..a7fdfb5d6c9 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -25,18 +25,12 @@ serde_json = "1.0.108" rococo-parachain-runtime = { path = "../parachains/runtimes/testing/rococo-parachain" } shell-runtime = { path = "../parachains/runtimes/starters/shell" } glutton-westend-runtime = { path = "../parachains/runtimes/glutton/glutton-westend" } -glutton-runtime = { path = "../parachains/runtimes/glutton/glutton-kusama" } seedling-runtime = { path = "../parachains/runtimes/starters/seedling" } -asset-hub-polkadot-runtime = { path = "../parachains/runtimes/assets/asset-hub-polkadot" } -asset-hub-kusama-runtime = { path = "../parachains/runtimes/assets/asset-hub-kusama" } asset-hub-rococo-runtime = { path = "../parachains/runtimes/assets/asset-hub-rococo" } asset-hub-westend-runtime = { path = "../parachains/runtimes/assets/asset-hub-westend" } -collectives-polkadot-runtime = { path = "../parachains/runtimes/collectives/collectives-polkadot" } collectives-westend-runtime = { path = "../parachains/runtimes/collectives/collectives-westend" } contracts-rococo-runtime = { path = "../parachains/runtimes/contracts/contracts-rococo" } bridge-hub-rococo-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-rococo" } -bridge-hub-kusama-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-kusama" } -bridge-hub-polkadot-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-polkadot" } bridge-hub-westend-runtime = { path = "../parachains/runtimes/bridge-hubs/bridge-hub-westend" } penpal-runtime = { path = "../parachains/runtimes/testing/penpal" } jsonrpsee = { version = "0.16.2", features = ["server"] } @@ -121,22 +115,16 @@ wait-timeout = "0.2" [features] default = [] runtime-benchmarks = [ - "asset-hub-kusama-runtime/runtime-benchmarks", - "asset-hub-polkadot-runtime/runtime-benchmarks", "asset-hub-rococo-runtime/runtime-benchmarks", "asset-hub-westend-runtime/runtime-benchmarks", - "bridge-hub-kusama-runtime/runtime-benchmarks", - "bridge-hub-polkadot-runtime/runtime-benchmarks", "bridge-hub-rococo-runtime/runtime-benchmarks", "bridge-hub-westend-runtime/runtime-benchmarks", - "collectives-polkadot-runtime/runtime-benchmarks", "collectives-westend-runtime/runtime-benchmarks", "contracts-rococo-runtime/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", - "glutton-runtime/runtime-benchmarks", "glutton-westend-runtime/runtime-benchmarks", "parachains-common/runtime-benchmarks", "penpal-runtime/runtime-benchmarks", @@ -148,20 +136,14 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] try-runtime = [ - "asset-hub-kusama-runtime/try-runtime", - "asset-hub-polkadot-runtime/try-runtime", "asset-hub-rococo-runtime/try-runtime", "asset-hub-westend-runtime/try-runtime", - "bridge-hub-kusama-runtime/try-runtime", - "bridge-hub-polkadot-runtime/try-runtime", "bridge-hub-rococo-runtime/try-runtime", "bridge-hub-westend-runtime/try-runtime", - "collectives-polkadot-runtime/try-runtime", "collectives-westend-runtime/try-runtime", "contracts-rococo-runtime/try-runtime", "frame-support/try-runtime", "frame-try-runtime/try-runtime", - "glutton-runtime/try-runtime", "glutton-westend-runtime/try-runtime", "pallet-transaction-payment/try-runtime", "penpal-runtime/try-runtime", diff --git a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs index 2988d6af0d1..f889e05a166 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/asset_hubs.rs @@ -20,35 +20,15 @@ use crate::chain_spec::{ }; use cumulus_primitives_core::ParaId; use hex_literal::hex; -use parachains_common::{AccountId, AssetHubPolkadotAuraId, AuraId, Balance as AssetHubBalance}; +use parachains_common::{AccountId, AuraId, Balance as AssetHubBalance}; use sc_service::ChainType; use sp_core::{crypto::UncheckedInto, sr25519}; -const ASSET_HUB_POLKADOT_ED: AssetHubBalance = - parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; -const ASSET_HUB_KUSAMA_ED: AssetHubBalance = - parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; const ASSET_HUB_WESTEND_ED: AssetHubBalance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; const ASSET_HUB_ROCOCO_ED: AssetHubBalance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; -/// Generate the session keys from individual elements. -/// -/// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn asset_hub_polkadot_session_keys( - keys: AssetHubPolkadotAuraId, -) -> asset_hub_polkadot_runtime::SessionKeys { - asset_hub_polkadot_runtime::SessionKeys { aura: keys } -} - -/// Generate the session keys from individual elements. -/// -/// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn asset_hub_kusama_session_keys(keys: AuraId) -> asset_hub_kusama_runtime::SessionKeys { - asset_hub_kusama_runtime::SessionKeys { aura: keys } -} - /// Generate the session keys from individual elements. /// /// The input must be a tuple of individual keys (a single arg for now since we have just one key). @@ -63,341 +43,6 @@ pub fn asset_hub_westend_session_keys(keys: AuraId) -> asset_hub_westend_runtime asset_hub_westend_runtime::SessionKeys { aura: keys } } -pub fn asset_hub_polkadot_development_config() -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - GenericChainSpec::builder( - asset_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot-dev".into(), para_id: 1000 }, - ) - .with_name("Polkadot Asset Hub Development") - .with_id("asset-hub-polkadot-dev") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_polkadot_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - 1000.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_polkadot_local_config() -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - GenericChainSpec::builder( - asset_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot-local".into(), para_id: 1000 }, - ) - .with_name("Polkadot Asset Hub Local") - .with_id("asset-hub-polkadot-local") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_polkadot_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1000.into(), - )) - .with_boot_nodes(Vec::new()) - .with_properties(properties) - .build() -} - -// Not used for syncing, but just to determine the genesis values set for the upgrade from shell. -pub fn asset_hub_polkadot_config() -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - GenericChainSpec::builder( - asset_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot".into(), para_id: 1000 }, - ) - .with_name("Polkadot Asset Hub") - .with_id("asset-hub-polkadot") - .with_chain_type(ChainType::Live) - .with_genesis_config_patch(asset_hub_polkadot_genesis( - // initial collators. - vec![ - ( - hex!("4c3d674d2a01060f0ded218e5dcc6f90c1726f43df79885eb3e22d97a20d5421").into(), - hex!("4c3d674d2a01060f0ded218e5dcc6f90c1726f43df79885eb3e22d97a20d5421") - .unchecked_into(), - ), - ( - hex!("c7d7d38d16bc23c6321152c50306212dc22c0efc04a2e52b5cccfc31ab3d7811").into(), - hex!("c7d7d38d16bc23c6321152c50306212dc22c0efc04a2e52b5cccfc31ab3d7811") - .unchecked_into(), - ), - ( - hex!("c5c07ba203d7375675f5c1ebe70f0a5bb729ae57b48bcc877fcc2ab21309b762").into(), - hex!("c5c07ba203d7375675f5c1ebe70f0a5bb729ae57b48bcc877fcc2ab21309b762") - .unchecked_into(), - ), - ( - hex!("0b2d0013fb974794bd7aa452465b567d48ef70373fe231a637c1fb7c547e85b3").into(), - hex!("0b2d0013fb974794bd7aa452465b567d48ef70373fe231a637c1fb7c547e85b3") - .unchecked_into(), - ), - ], - vec![], - 1000u32.into(), - )) - .with_boot_nodes(vec![ - "/ip4/34.65.251.121/tcp/30334/p2p/12D3KooWG3GrM6XKMM4gp3cvemdwUvu96ziYoJmqmetLZBXE8bSa" - .parse() - .unwrap(), - "/ip4/34.65.35.228/tcp/30334/p2p/12D3KooWMRyTLrCEPcAQD6c4EnudL3vVzg9zji3whvsMYPUYevpq" - .parse() - .unwrap(), - "/ip4/34.83.247.146/tcp/30334/p2p/12D3KooWE4jFh5FpJDkWVZhnWtFnbSqRhdjvC7Dp9b8b3FTuubQC" - .parse() - .unwrap(), - "/ip4/104.199.117.230/tcp/30334/p2p/12D3KooWG9R8pVXKumVo2rdkeVD4j5PVhRTqmYgLHY3a4yPYgLqM" - .parse() - .unwrap(), - ]) - .with_properties(properties) - .build() -} - -fn asset_hub_polkadot_genesis( - invulnerables: Vec<(AccountId, AssetHubPolkadotAuraId)>, - endowed_accounts: Vec, - id: ParaId, -) -> serde_json::Value { - serde_json::json!( { - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, ASSET_HUB_POLKADOT_ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": ASSET_HUB_POLKADOT_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_polkadot_session_keys(aura), // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - } - }) -} - -pub fn asset_hub_kusama_development_config() -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - properties.insert("tokenSymbol".into(), "KSM".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - GenericChainSpec::builder( - asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama-dev".into(), para_id: 1000 }, - ) - .with_name("Kusama Asset Hub Development") - .with_id("asset-hub-kusama-dev") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_kusama_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - 1000.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_kusama_local_config() -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - properties.insert("tokenSymbol".into(), "KSM".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - GenericChainSpec::builder( - asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama-local".into(), para_id: 1000 }, - ) - .with_name("Kusama Asset Hub Local") - .with_id("asset-hub-kusama-local") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(asset_hub_kusama_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1000.into(), - )) - .with_properties(properties) - .build() -} - -pub fn asset_hub_kusama_config() -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - properties.insert("tokenSymbol".into(), "KSM".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - GenericChainSpec::builder( - asset_hub_kusama_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama".into(), para_id: 1000 }, - ) - .with_name("Kusama Asset Hub") - .with_id("asset-hub-kusama") - .with_chain_type(ChainType::Live) - .with_genesis_config_patch(asset_hub_kusama_genesis( - // initial collators. - vec![ - ( - hex!("50673d59020488a4ffc9d8c6de3062a65977046e6990915617f85fef6d349730").into(), - hex!("50673d59020488a4ffc9d8c6de3062a65977046e6990915617f85fef6d349730") - .unchecked_into(), - ), - ( - hex!("fe8102dbc244e7ea2babd9f53236d67403b046154370da5c3ea99def0bd0747a").into(), - hex!("fe8102dbc244e7ea2babd9f53236d67403b046154370da5c3ea99def0bd0747a") - .unchecked_into(), - ), - ( - hex!("38144b5398e5d0da5ec936a3af23f5a96e782f676ab19d45f29075ee92eca76a").into(), - hex!("38144b5398e5d0da5ec936a3af23f5a96e782f676ab19d45f29075ee92eca76a") - .unchecked_into(), - ), - ( - hex!("3253947640e309120ae70fa458dcacb915e2ddd78f930f52bd3679ec63fc4415").into(), - hex!("3253947640e309120ae70fa458dcacb915e2ddd78f930f52bd3679ec63fc4415") - .unchecked_into(), - ), - ], - Vec::new(), - 1000.into(), - )) - .with_properties(properties) - .build() -} - -fn asset_hub_kusama_genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, -) -> serde_json::Value { - serde_json::json!( { - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, ASSET_HUB_KUSAMA_ED * 524_288)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": ASSET_HUB_KUSAMA_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - asset_hub_kusama_session_keys(aura), // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - }) -} - pub fn asset_hub_westend_development_config() -> GenericChainSpec { let mut properties = sc_chain_spec::Properties::new(); properties.insert("tokenSymbol".into(), "WND".into()); diff --git a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs index 377ceb20e1e..8dab692c1cd 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/bridge_hubs.rs @@ -24,21 +24,14 @@ use std::str::FromStr; /// Collects all supported BridgeHub configurations #[derive(Debug, PartialEq)] pub enum BridgeHubRuntimeType { + Kusama, + Polkadot, + Rococo, RococoLocal, // used by benchmarks RococoDevelopment, - Kusama, - KusamaLocal, - // used by benchmarks - KusamaDevelopment, - - Polkadot, - PolkadotLocal, - // used by benchmarks - PolkadotDevelopment, - Westend, WestendLocal, // used by benchmarks @@ -51,12 +44,7 @@ impl FromStr for BridgeHubRuntimeType { fn from_str(value: &str) -> Result { match value { polkadot::BRIDGE_HUB_POLKADOT => Ok(BridgeHubRuntimeType::Polkadot), - polkadot::BRIDGE_HUB_POLKADOT_LOCAL => Ok(BridgeHubRuntimeType::PolkadotLocal), - polkadot::BRIDGE_HUB_POLKADOT_DEVELOPMENT => - Ok(BridgeHubRuntimeType::PolkadotDevelopment), kusama::BRIDGE_HUB_KUSAMA => Ok(BridgeHubRuntimeType::Kusama), - kusama::BRIDGE_HUB_KUSAMA_LOCAL => Ok(BridgeHubRuntimeType::KusamaLocal), - kusama::BRIDGE_HUB_KUSAMA_DEVELOPMENT => Ok(BridgeHubRuntimeType::KusamaDevelopment), westend::BRIDGE_HUB_WESTEND => Ok(BridgeHubRuntimeType::Westend), westend::BRIDGE_HUB_WESTEND_LOCAL => Ok(BridgeHubRuntimeType::WestendLocal), westend::BRIDGE_HUB_WESTEND_DEVELOPMENT => Ok(BridgeHubRuntimeType::WestendDevelopment), @@ -76,33 +64,9 @@ impl BridgeHubRuntimeType { BridgeHubRuntimeType::Polkadot => Ok(Box::new(GenericChainSpec::from_json_bytes( &include_bytes!("../../chain-specs/bridge-hub-polkadot.json")[..], )?)), - BridgeHubRuntimeType::PolkadotLocal => Ok(Box::new(polkadot::local_config( - polkadot::BRIDGE_HUB_POLKADOT_LOCAL, - "Polkadot BridgeHub Local", - "polkadot-local", - ParaId::new(1002), - ))), - BridgeHubRuntimeType::PolkadotDevelopment => Ok(Box::new(polkadot::local_config( - polkadot::BRIDGE_HUB_POLKADOT_DEVELOPMENT, - "Polkadot BridgeHub Development", - "polkadot-dev", - ParaId::new(1002), - ))), BridgeHubRuntimeType::Kusama => Ok(Box::new(GenericChainSpec::from_json_bytes( &include_bytes!("../../chain-specs/bridge-hub-kusama.json")[..], )?)), - BridgeHubRuntimeType::KusamaLocal => Ok(Box::new(kusama::local_config( - kusama::BRIDGE_HUB_KUSAMA_LOCAL, - "Kusama BridgeHub Local", - "kusama-local", - ParaId::new(1003), - ))), - BridgeHubRuntimeType::KusamaDevelopment => Ok(Box::new(kusama::local_config( - kusama::BRIDGE_HUB_KUSAMA_DEVELOPMENT, - "Kusama BridgeHub Development", - "kusama-dev", - ParaId::new(1003), - ))), BridgeHubRuntimeType::Westend => Ok(Box::new(GenericChainSpec::from_json_bytes( &include_bytes!("../../chain-specs/bridge-hub-westend.json")[..], )?)), @@ -273,109 +237,7 @@ pub mod rococo { /// Sub-module for Kusama setup pub mod kusama { - use super::{BridgeHubBalance, ParaId}; - use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, - SAFE_XCM_VERSION, - }; - use parachains_common::{AccountId, AuraId}; - use sc_chain_spec::ChainType; - use sp_core::sr25519; - pub(crate) const BRIDGE_HUB_KUSAMA: &str = "bridge-hub-kusama"; - pub(crate) const BRIDGE_HUB_KUSAMA_LOCAL: &str = "bridge-hub-kusama-local"; - pub(crate) const BRIDGE_HUB_KUSAMA_DEVELOPMENT: &str = "bridge-hub-kusama-dev"; - const BRIDGE_HUB_KUSAMA_ED: BridgeHubBalance = - parachains_common::kusama::currency::EXISTENTIAL_DEPOSIT; - - pub fn local_config( - id: &str, - chain_name: &str, - relay_chain: &str, - para_id: ParaId, - ) -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - properties.insert("tokenSymbol".into(), "KSM".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - GenericChainSpec::builder( - bridge_hub_kusama_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, - ) - .with_name(chain_name) - .with_id(super::ensure_id(id).expect("invalid id")) - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - )) - .with_properties(properties) - .build() - } - - fn genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, - ) -> serde_json::Value { - serde_json::json!({ - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, BRIDGE_HUB_KUSAMA_ED * 524_288)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": BRIDGE_HUB_KUSAMA_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_kusama_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - } - }) - } } /// Sub-module for Westend setup. @@ -491,107 +353,5 @@ pub mod westend { /// Sub-module for Polkadot setup pub mod polkadot { - use super::{BridgeHubBalance, ParaId}; - use crate::chain_spec::{ - get_account_id_from_seed, get_collator_keys_from_seed, Extensions, GenericChainSpec, - SAFE_XCM_VERSION, - }; - use parachains_common::{AccountId, AuraId}; - use sc_chain_spec::ChainType; - use sp_core::sr25519; - pub(crate) const BRIDGE_HUB_POLKADOT: &str = "bridge-hub-polkadot"; - pub(crate) const BRIDGE_HUB_POLKADOT_LOCAL: &str = "bridge-hub-polkadot-local"; - pub(crate) const BRIDGE_HUB_POLKADOT_DEVELOPMENT: &str = "bridge-hub-polkadot-dev"; - const BRIDGE_HUB_POLKADOT_ED: BridgeHubBalance = - parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; - - pub fn local_config( - id: &str, - chain_name: &str, - relay_chain: &str, - para_id: ParaId, - ) -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - GenericChainSpec::builder( - bridge_hub_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: relay_chain.to_string(), para_id: para_id.into() }, - ) - .with_name(chain_name) - .with_id(super::ensure_id(id).expect("invalid id")) - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - para_id, - )) - .with_properties(properties) - .build() - } - - fn genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, - ) -> serde_json::Value { - serde_json::json!({ - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, BRIDGE_HUB_POLKADOT_ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": BRIDGE_HUB_POLKADOT_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - bridge_hub_polkadot_runtime::SessionKeys { aura }, // session keys - ) - }) - .collect::>(), - }, - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - } - }) - } } diff --git a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs index ac75a40ebde..dd67bf975f7 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/collectives.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/collectives.rs @@ -23,143 +23,9 @@ use parachains_common::{AccountId, AuraId, Balance as CollectivesBalance}; use sc_service::ChainType; use sp_core::sr25519; -const COLLECTIVES_POLKADOT_ED: CollectivesBalance = - parachains_common::polkadot::currency::EXISTENTIAL_DEPOSIT; const COLLECTIVES_WESTEND_ED: CollectivesBalance = parachains_common::westend::currency::EXISTENTIAL_DEPOSIT; -/// Generate the session keys from individual elements. -/// -/// The input must be a tuple of individual keys (a single arg for now since we have just one key). -pub fn collectives_polkadot_session_keys( - keys: AuraId, -) -> collectives_polkadot_runtime::SessionKeys { - collectives_polkadot_runtime::SessionKeys { aura: keys } -} - -pub fn collectives_polkadot_development_config() -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - GenericChainSpec::builder( - collectives_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot-dev".into(), para_id: 1002 }, - ) - .with_name("Polkadot Collectives Development") - .with_id("collectives_polkadot_dev") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(collectives_polkadot_genesis( - // initial collators. - vec![( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - )], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - // 1002 avoids a potential collision with Kusama-1001 (Encointer) should there ever - // be a collective para on Kusama. - 1002.into(), - )) - .with_boot_nodes(Vec::new()) - .with_properties(properties) - .build() -} - -/// Collectives Polkadot Local Config. -pub fn collectives_polkadot_local_config() -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 0.into()); - properties.insert("tokenSymbol".into(), "DOT".into()); - properties.insert("tokenDecimals".into(), 10.into()); - - GenericChainSpec::builder( - collectives_polkadot_runtime::WASM_BINARY - .expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "polkadot-local".into(), para_id: 1002 }, - ) - .with_name("Polkadot Collectives Local") - .with_id("collectives_polkadot_local") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(collectives_polkadot_genesis( - // initial collators. - vec![ - ( - get_account_id_from_seed::("Alice"), - get_collator_keys_from_seed::("Alice"), - ), - ( - get_account_id_from_seed::("Bob"), - get_collator_keys_from_seed::("Bob"), - ), - ], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - 1002.into(), - )) - .with_boot_nodes(Vec::new()) - .with_properties(properties) - .build() -} - -fn collectives_polkadot_genesis( - invulnerables: Vec<(AccountId, AuraId)>, - endowed_accounts: Vec, - id: ParaId, -) -> serde_json::Value { - serde_json::json!( { - "balances": { - "balances": endowed_accounts - .iter() - .cloned() - .map(|k| (k, COLLECTIVES_POLKADOT_ED * 4096)) - .collect::>(), - }, - "parachainInfo": { - "parachainId": id, - }, - "collatorSelection": { - "invulnerables": invulnerables.iter().cloned().map(|(acc, _)| acc).collect::>(), - "candidacyBond": COLLECTIVES_POLKADOT_ED * 16, - }, - "session": { - "keys": invulnerables - .into_iter() - .map(|(acc, aura)| { - ( - acc.clone(), // account id - acc, // validator id - collectives_polkadot_session_keys(aura), // session keys - ) - }) - .collect::>(), - }, - // no need to pass anything to aura, in fact it will panic if we do. Session will take care - // of this. - "polkadotXcm": { - "safeXcmVersion": Some(SAFE_XCM_VERSION), - }, - }) -} - /// Generate the session keys from individual elements. /// /// The input must be a tuple of individual keys (a single arg for now since we have just one key). diff --git a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs index 8eced8d8f81..77a4123b13e 100644 --- a/cumulus/polkadot-parachain/src/chain_spec/glutton.rs +++ b/cumulus/polkadot-parachain/src/chain_spec/glutton.rs @@ -22,62 +22,6 @@ use sp_core::sr25519; use super::get_collator_keys_from_seed; -pub fn glutton_development_config(para_id: ParaId) -> GenericChainSpec { - GenericChainSpec::builder( - glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama-dev".into(), para_id: para_id.into() }, - ) - .with_name("Glutton Development") - .with_id("glutton_dev") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(glutton_genesis( - para_id, - vec![get_collator_keys_from_seed::("Alice")], - )) - .build() -} - -pub fn glutton_local_config(para_id: ParaId) -> GenericChainSpec { - GenericChainSpec::builder( - glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama-local".into(), para_id: para_id.into() }, - ) - .with_name("Glutton Local") - .with_id("glutton_local") - .with_chain_type(ChainType::Local) - .with_genesis_config_patch(glutton_genesis( - para_id, - vec![ - get_collator_keys_from_seed::("Alice"), - get_collator_keys_from_seed::("Bob"), - ], - )) - .build() -} - -pub fn glutton_config(para_id: ParaId) -> GenericChainSpec { - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("ss58Format".into(), 2.into()); - - GenericChainSpec::builder( - glutton_runtime::WASM_BINARY.expect("WASM binary was not built, please build it!"), - Extensions { relay_chain: "kusama".into(), para_id: para_id.into() }, - ) - .with_name(format!("Glutton {}", para_id).as_str()) - .with_id(format!("glutton-kusama-{}", para_id).as_str()) - .with_chain_type(ChainType::Live) - .with_genesis_config_patch(glutton_genesis( - para_id, - vec![ - get_collator_keys_from_seed::("Alice"), - get_collator_keys_from_seed::("Bob"), - ], - )) - .with_protocol_id(format!("glutton-kusama-{}", para_id).as_str()) - .with_properties(properties) - .build() -} - fn glutton_genesis(parachain_id: ParaId, collators: Vec) -> serde_json::Value { serde_json::json!( { "parachainInfo": { diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index f966a5db8f3..98dcc2fea4a 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -144,27 +144,11 @@ fn load_spec(id: &str) -> std::result::Result, String> { "seedling" => Box::new(chain_spec::seedling::get_seedling_chain_spec()), // -- Asset Hub Polkadot - "asset-hub-polkadot-dev" | "statemint-dev" => - Box::new(chain_spec::asset_hubs::asset_hub_polkadot_development_config()), - "asset-hub-polkadot-local" | "statemint-local" => - Box::new(chain_spec::asset_hubs::asset_hub_polkadot_local_config()), - // the chain spec as used for generating the upgrade genesis values - "asset-hub-polkadot-genesis" | "statemint-genesis" => - Box::new(chain_spec::asset_hubs::asset_hub_polkadot_config()), - // the shell-based chain spec as used for syncing "asset-hub-polkadot" | "statemint" => Box::new(GenericChainSpec::from_json_bytes( &include_bytes!("../chain-specs/asset-hub-polkadot.json")[..], )?), // -- Asset Hub Kusama - "asset-hub-kusama-dev" | "statemine-dev" => - Box::new(chain_spec::asset_hubs::asset_hub_kusama_development_config()), - "asset-hub-kusama-local" | "statemine-local" => - Box::new(chain_spec::asset_hubs::asset_hub_kusama_local_config()), - // the chain spec as used for generating the upgrade genesis values - "asset-hub-kusama-genesis" | "statemine-genesis" => - Box::new(chain_spec::asset_hubs::asset_hub_kusama_config()), - // the shell-based chain spec as used for syncing "asset-hub-kusama" | "statemine" => Box::new(GenericChainSpec::from_json_bytes( &include_bytes!("../chain-specs/asset-hub-kusama.json")[..], )?), @@ -195,13 +179,11 @@ fn load_spec(id: &str) -> std::result::Result, String> { )?), // -- Polkadot Collectives - "collectives-polkadot-dev" => - Box::new(chain_spec::collectives::collectives_polkadot_development_config()), - "collectives-polkadot-local" => - Box::new(chain_spec::collectives::collectives_polkadot_local_config()), "collectives-polkadot" => Box::new(GenericChainSpec::from_json_bytes( &include_bytes!("../chain-specs/collectives-polkadot.json")[..], )?), + + // -- Westend Collectives "collectives-westend-dev" => Box::new(chain_spec::collectives::collectives_westend_development_config()), "collectives-westend-local" => @@ -229,14 +211,14 @@ fn load_spec(id: &str) -> std::result::Result, String> { .expect("invalid value") .load_config()?, - // -- Penpall - "penpal-kusama" => Box::new(chain_spec::penpal::get_penpal_chain_spec( + // -- Penpal + "penpal-rococo" => Box::new(chain_spec::penpal::get_penpal_chain_spec( para_id.expect("Must specify parachain id"), - "kusama-local", + "rococo-local", )), - "penpal-polkadot" => Box::new(chain_spec::penpal::get_penpal_chain_spec( + "penpal-westend" => Box::new(chain_spec::penpal::get_penpal_chain_spec( para_id.expect("Must specify parachain id"), - "polkadot-local", + "westend-local", )), // -- Glutton Westend @@ -251,18 +233,6 @@ fn load_spec(id: &str) -> std::result::Result, String> { para_id.expect("Must specify parachain id"), )), - // -- Glutton - "glutton-kusama-dev" => Box::new(chain_spec::glutton::glutton_development_config( - para_id.expect("Must specify parachain id"), - )), - "glutton-kusama-local" => Box::new(chain_spec::glutton::glutton_local_config( - para_id.expect("Must specify parachain id"), - )), - // the chain spec as used for generating the upgrade genesis values - "glutton-kusama-genesis" => Box::new(chain_spec::glutton::glutton_config( - para_id.expect("Must specify parachain id"), - )), - // -- Fallback (generic chainspec) "" => { log::warn!("No ChainSpec.id specified, so using default one, based on rococo-parachain runtime"); @@ -760,18 +730,14 @@ pub fn run() -> Result<()> { .map(|r| r.0) .map_err(Into::into), Runtime::BridgeHub(bridge_hub_runtime_type) => match bridge_hub_runtime_type { -chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::PolkadotDevelopment => + chain_spec::bridge_hubs::BridgeHubRuntimeType::Polkadot => crate::service::start_generic_aura_node::< RuntimeApi, AuraId, >(config, polkadot_config, collator_options, id, hwbench) .await .map(|r| r.0), - chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaLocal | - chain_spec::bridge_hubs::BridgeHubRuntimeType::KusamaDevelopment => + chain_spec::bridge_hubs::BridgeHubRuntimeType::Kusama => crate::service::start_generic_aura_node::< RuntimeApi, AuraId, @@ -1052,12 +1018,6 @@ mod tests { ); assert_eq!(Runtime::Default, path.runtime()); - let path = store_configuration( - &temp_dir, - Box::new(crate::chain_spec::asset_hubs::asset_hub_kusama_local_config()), - ); - assert_eq!(Runtime::AssetHubKusama, path.runtime()); - let path = store_configuration( &temp_dir, Box::new(crate::chain_spec::contracts::contracts_rococo_local_config()), diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index b0fca518201..6280d86e9f9 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -96,36 +96,6 @@ impl sc_executor::NativeExecutionDispatch for ShellRuntimeExecutor { } } -/// Native Asset Hub Polkadot (Statemint) executor instance. -pub struct AssetHubPolkadotRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for AssetHubPolkadotRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - asset_hub_polkadot_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - asset_hub_polkadot_runtime::native_version() - } -} - -/// Native Asset Hub Kusama (Statemine) executor instance. -pub struct AssetHubKusamaExecutor; - -impl sc_executor::NativeExecutionDispatch for AssetHubKusamaExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - asset_hub_kusama_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - asset_hub_kusama_runtime::native_version() - } -} - /// Native Asset Hub Westend (Westmint) executor instance. pub struct AssetHubWestendExecutor; @@ -141,21 +111,6 @@ impl sc_executor::NativeExecutionDispatch for AssetHubWestendExecutor { } } -/// Native Polkadot Collectives executor instance. -pub struct CollectivesPolkadotRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for CollectivesPolkadotRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - collectives_polkadot_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - collectives_polkadot_runtime::native_version() - } -} - /// Native Westend Collectives executor instance. pub struct CollectivesWestendRuntimeExecutor; @@ -171,36 +126,6 @@ impl sc_executor::NativeExecutionDispatch for CollectivesWestendRuntimeExecutor } } -/// Native BridgeHubPolkadot executor instance. -pub struct BridgeHubPolkadotRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for BridgeHubPolkadotRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - bridge_hub_polkadot_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - bridge_hub_polkadot_runtime::native_version() - } -} - -/// Native BridgeHubKusama executor instance. -pub struct BridgeHubKusamaRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for BridgeHubKusamaRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - bridge_hub_kusama_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - bridge_hub_kusama_runtime::native_version() - } -} - /// Native BridgeHubRococo executor instance. pub struct BridgeHubRococoRuntimeExecutor; @@ -246,21 +171,6 @@ impl sc_executor::NativeExecutionDispatch for GluttonWestendRuntimeExecutor { } } -/// Native Glutton executor instance. -pub struct GluttonRuntimeExecutor; - -impl sc_executor::NativeExecutionDispatch for GluttonRuntimeExecutor { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - shell_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - shell_runtime::native_version() - } -} - /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to diff --git a/cumulus/polkadot-parachain/tests/benchmark_storage_works.rs b/cumulus/polkadot-parachain/tests/benchmark_storage_works.rs index c2850b64e45..c554b5b3d6b 100644 --- a/cumulus/polkadot-parachain/tests/benchmark_storage_works.rs +++ b/cumulus/polkadot-parachain/tests/benchmark_storage_works.rs @@ -24,7 +24,7 @@ use std::{ use tempfile::tempdir; /// The runtimes that this command supports. -static RUNTIMES: [&str; 3] = ["asset-hub-westend", "asset-hub-kusama", "asset-hub-polkadot"]; +static RUNTIMES: [&str; 1] = ["asset-hub-westend"]; /// The `benchmark storage` command works for the dev runtimes. #[test] diff --git a/cumulus/scripts/benchmarks.sh b/cumulus/scripts/benchmarks.sh index 7da18d9440e..58b8419bf4a 100755 --- a/cumulus/scripts/benchmarks.sh +++ b/cumulus/scripts/benchmarks.sh @@ -6,16 +6,7 @@ repeat=${3:-20} __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -${__dir}/benchmarks-ci.sh collectives collectives-polkadot target/$target $steps $repeat ${__dir}/benchmarks-ci.sh collectives collectives-westend target/$target $steps $repeat - -${__dir}/benchmarks-ci.sh assets asset-hub-kusama target/$target $steps $repeat -${__dir}/benchmarks-ci.sh assets asset-hub-polkadot target/$target $steps $repeat ${__dir}/benchmarks-ci.sh assets asset-hub-westend target/$target $steps $repeat - -${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-polkadot target/$target $steps $repeat -${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-kusama target/$target $steps $repeat ${__dir}/benchmarks-ci.sh bridge-hubs bridge-hub-rococo target/$target $steps $repeat - -${__dir}/benchmarks-ci.sh glutton glutton-kusama target/$target $steps $repeat ${__dir}/benchmarks-ci.sh glutton glutton-westend target/$target $steps $repeat diff --git a/cumulus/scripts/create_bridge_hub_kusama_spec.sh b/cumulus/scripts/create_bridge_hub_kusama_spec.sh deleted file mode 100755 index 813921b079a..00000000000 --- a/cumulus/scripts/create_bridge_hub_kusama_spec.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash - -usage() { - echo Usage: - echo "$1 " - echo "$2 " - echo "e.g.: ./scripts/create_bridge_hub_kusama_spec.sh ./target/release/wbuild/bridge-hub-kusama-runtime/bridge_hub_kusama_runtime.compact.compressed.wasm 1002" - exit 1 -} - -if [ -z "$1" ]; then - usage -fi - -if [ -z "$2" ]; then - usage -fi - -set -e - -rt_path=$1 -para_id=$2 - -echo "Generating chain spec for runtime: $rt_path and para_id: $para_id" - -binary="./target/release/polkadot-parachain" - -# build the chain spec we'll manipulate -$binary build-spec --chain bridge-hub-kusama-dev > chain-spec-plain.json - -# convert runtime to hex -cat $rt_path | od -A n -v -t x1 | tr -d ' \n' > rt-hex.txt - -# replace the runtime in the spec with the given runtime and set some values to production -cat chain-spec-plain.json | jq --rawfile code rt-hex.txt '.genesis.runtime.system.code = ("0x" + $code)' \ - | jq '.name = "Kusama BridgeHub"' \ - | jq '.id = "bridge-hub-kusama"' \ - | jq '.chainType = "Live"' \ - | jq '.bootNodes = [ - "/dns/kusama-bridge-hub-collator-ew1-0.polkadot.io/tcp/30334/p2p/12D3KooWP2Gngt4tt2sz5BgDaAbMTxasPWk3V2Z99bQTmFcAorqa", - "/dns/kusama-bridge-hub-collator-ew1-1.polkadot.io/tcp/30334/p2p/12D3KooWMmL3FQuYmruBui1sbY4MwNmvicinrePi1Yq4QMRSYHoR", - "/dns/kusama-bridge-hub-collator-ue4-0.polkadot.io/tcp/30334/p2p/12D3KooWQpTocTck1tNBzMNTHJ3kSv4vzv8Yf9FpVkfGnungbez4", - "/dns/kusama-bridge-hub-collator-ue4-1.polkadot.io/tcp/30334/p2p/12D3KooWRgtJqKEaMi7hkU4VMiGhpHTJeL8N7JgL7d9gwooPv4eW", - - "/dns/kusama-bridge-hub-connect-ew1-0.polkadot.io/tcp/30334/p2p/12D3KooWPQQPivrqQ51kRTDc2R1mtqwKT4GGtk2rapkY4FrwHrEp", - "/dns/kusama-bridge-hub-connect-ew1-1.polkadot.io/tcp/30334/p2p/12D3KooWPcF9Yk4gYrMju9CyWCV69hAFXbYsnxCLogwLGu9QFTRn", - "/dns/kusama-bridge-hub-connect-ue4-0.polkadot.io/tcp/30334/p2p/12D3KooWMf1sVnJDTkKWtaThqvrgcSPLbfGXttSqbwhM2DJp9BUG", - "/dns/kusama-bridge-hub-connect-ue4-1.polkadot.io/tcp/30334/p2p/12D3KooWQaV7wMfNVKy2aMz4Lds3TTxgSDyZAUEnbAZMfD8rW3ow", - - "/dns/kusama-bridge-hub-connect-ew1-0.polkadot.io/tcp/443/wss/p2p/12D3KooWPQQPivrqQ51kRTDc2R1mtqwKT4GGtk2rapkY4FrwHrEp", - "/dns/kusama-bridge-hub-connect-ew1-1.polkadot.io/tcp/443/wss/p2p/12D3KooWPcF9Yk4gYrMju9CyWCV69hAFXbYsnxCLogwLGu9QFTRn", - "/dns/kusama-bridge-hub-connect-ue4-0.polkadot.io/tcp/443/wss/p2p/12D3KooWMf1sVnJDTkKWtaThqvrgcSPLbfGXttSqbwhM2DJp9BUG", - "/dns/kusama-bridge-hub-connect-ue4-1.polkadot.io/tcp/443/wss/p2p/12D3KooWQaV7wMfNVKy2aMz4Lds3TTxgSDyZAUEnbAZMfD8rW3ow" - - ]' \ - | jq '.relay_chain = "kusama"' \ - | jq --argjson para_id $para_id '.para_id = $para_id' \ - | jq --argjson para_id $para_id '.genesis.runtime.parachainInfo.parachainId = $para_id' \ - | jq '.genesis.runtime.balances.balances = []' \ - | jq '.genesis.runtime.collatorSelection.invulnerables = [ - "DQkekNBt8g6D7bPUEqhgfujADxzzfivr1qQZJkeGzAqnEzF", - "HbUc5qrLtKAZvasioiTSf1CunaN2SyEwvfsgMuYQjXA5sfk", - "JEe4NcVyuWFEwZe4WLfRtynDswyKgvLS8H8r4Wo9d3t61g1", - "FAe4DGhQHKTm35n5MgBFNBZvyEJcm7QAwgnVNQU8KXP2ixn" - ]' \ - | jq '.genesis.runtime.session.keys = [ - [ - "DQkekNBt8g6D7bPUEqhgfujADxzzfivr1qQZJkeGzAqnEzF", - "DQkekNBt8g6D7bPUEqhgfujADxzzfivr1qQZJkeGzAqnEzF", - { - "aura": "5E7AiV9ygGUcfdK3XVoJsew7fsu18uvKQHYhksE5PXDNfRL9" - } - ], - [ - "HbUc5qrLtKAZvasioiTSf1CunaN2SyEwvfsgMuYQjXA5sfk", - "HbUc5qrLtKAZvasioiTSf1CunaN2SyEwvfsgMuYQjXA5sfk", - { - "aura": "5CyXoMh8cA2MSk55JASpCfhCg44iSG5fBwmhvSfXUUS3uhPR" - } - ], - [ - "JEe4NcVyuWFEwZe4WLfRtynDswyKgvLS8H8r4Wo9d3t61g1", - "JEe4NcVyuWFEwZe4WLfRtynDswyKgvLS8H8r4Wo9d3t61g1", - { - "aura": "5Grj5pN52kKU61qK9qP5cf9ADuyowe2WVvYWxMNK1QqAM8qf" - } - ], - [ - "FAe4DGhQHKTm35n5MgBFNBZvyEJcm7QAwgnVNQU8KXP2ixn", - "FAe4DGhQHKTm35n5MgBFNBZvyEJcm7QAwgnVNQU8KXP2ixn", - { - "aura": "5EHTyftGjcHfe71VVuZqCeLbHNf4ptYzgdAMMyqpTNbs5Rrp" - } - ] - ]' \ - > edited-chain-spec-plain.json - -# build a raw spec -$binary build-spec --chain edited-chain-spec-plain.json --raw > chain-spec-raw.json -cp edited-chain-spec-plain.json bridge-hub-kusama-spec.json -cp chain-spec-raw.json ./parachains/chain-specs/bridge-hub-kusama.json -cp chain-spec-raw.json bridge-hub-kusama-spec-raw.json - -# build genesis data -$binary export-genesis-state --chain chain-spec-raw.json > bridge-hub-kusama-genesis-head-data - -# build genesis wasm -$binary export-genesis-wasm --chain chain-spec-raw.json > bridge-hub-kusama-wasm diff --git a/cumulus/scripts/create_bridge_hub_polkadot_spec.sh b/cumulus/scripts/create_bridge_hub_polkadot_spec.sh deleted file mode 100755 index 49bc9cee692..00000000000 --- a/cumulus/scripts/create_bridge_hub_polkadot_spec.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash - -usage() { - echo Usage: - echo "$1 " - echo "$2 " - echo "e.g.: ./scripts/create_bridge_hub_polkadot_spec.sh ./target/release/wbuild/bridge-hub-polkadot-runtime/bridge_hub_polkadot_runtime.compact.compressed.wasm 1002" - exit 1 -} - -if [ -z "$1" ]; then - usage -fi - -if [ -z "$2" ]; then - usage -fi - -set -e - -rt_path=$1 -para_id=$2 - -echo "Generating chain spec for runtime: $rt_path and para_id: $para_id" - -binary="./target/release/polkadot-parachain" - -# build the chain spec we'll manipulate -$binary build-spec --chain bridge-hub-polkadot-dev > chain-spec-plain.json - -# convert runtime to hex -cat $rt_path | od -A n -v -t x1 | tr -d ' \n' > rt-hex.txt - -# replace the runtime in the spec with the given runtime and set some values to production -cat chain-spec-plain.json | jq --rawfile code rt-hex.txt '.genesis.runtime.system.code = ("0x" + $code)' \ - | jq '.name = "Polkadot BridgeHub"' \ - | jq '.id = "bridge-hub-polkadot"' \ - | jq '.chainType = "Live"' \ - | jq '.bootNodes = [ - "/dns/polkadot-bridge-hub-connect-a-0.polkadot.io/tcp/30334/p2p/12D3KooWAVQMhkXmc5ueSYasdsRWQbKus2YGZ6HDZUB4ViJMCxXy", - "/dns/polkadot-bridge-hub-connect-a-1.polkadot.io/tcp/30334/p2p/12D3KooWG4ypDHLKGCv4BZ6PuaGUwQHKAH6p2D6arR2uQ1eiR1T3", - "/dns/polkadot-bridge-hub-connect-b-0.polkadot.io/tcp/30334/p2p/12D3KooWCwGKxjpJXnx1mwXKvaxGQm769EM3b6Pg5vbU33wbhsNw", - "/dns/polkadot-bridge-hub-connect-b-1.polkadot.io/tcp/30334/p2p/12D3KooWLiSEdhriJUPdZKFtAjZrQncxN2ssEoDKVrt5mGM4Qu4J", - - "/dns/polkadot-bridge-hub-connect-a-0.polkadot.io/tcp/443/wss/p2p/12D3KooWAVQMhkXmc5ueSYasdsRWQbKus2YGZ6HDZUB4ViJMCxXy", - "/dns/polkadot-bridge-hub-connect-a-1.polkadot.io/tcp/443/wss/p2p/12D3KooWG4ypDHLKGCv4BZ6PuaGUwQHKAH6p2D6arR2uQ1eiR1T3", - "/dns/polkadot-bridge-hub-connect-b-0.polkadot.io/tcp/443/wss/p2p/12D3KooWCwGKxjpJXnx1mwXKvaxGQm769EM3b6Pg5vbU33wbhsNw", - "/dns/polkadot-bridge-hub-connect-b-1.polkadot.io/tcp/443/wss/p2p/12D3KooWLiSEdhriJUPdZKFtAjZrQncxN2ssEoDKVrt5mGM4Qu4J" - ]' \ - | jq '.relay_chain = "polkadot"' \ - | jq --argjson para_id $para_id '.para_id = $para_id' \ - | jq --argjson para_id $para_id '.genesis.runtime.parachainInfo.parachainId = $para_id' \ - | jq '.genesis.runtime.balances.balances = []' \ - | jq '.genesis.runtime.collatorSelection.invulnerables = [ - "134AK3RiMA97Fx9dLj1CvuLJUa8Yo93EeLA1TkP6CCGnWMSd", - "15dU8Tt7kde2diuHzijGbKGPU5K8BPzrFJfYFozvrS1DdE21", - "1vXMKM8SctM28AQw1wSpd7p9yCUWn1uhbbKSVTuznsw8Q2x", - "15mCQcaj3QP1UdxBF82JRd9v3riZJcVNVEmx8xkFp7DSYR4Y" - ]' \ - | jq '.genesis.runtime.session.keys = [ - [ - "134AK3RiMA97Fx9dLj1CvuLJUa8Yo93EeLA1TkP6CCGnWMSd", - "134AK3RiMA97Fx9dLj1CvuLJUa8Yo93EeLA1TkP6CCGnWMSd", - { - "aura": "5EX6AnyuSPEFQ7HAPjRgzqk1sxgh8cyacGimwJ16y1nJ2w7g" - } - ], - [ - "15dU8Tt7kde2diuHzijGbKGPU5K8BPzrFJfYFozvrS1DdE21", - "15dU8Tt7kde2diuHzijGbKGPU5K8BPzrFJfYFozvrS1DdE21", - { - "aura": "5DZN8UhaJftvKhMMARmJBwrwzuEDpoUzzBvvWMbFXYsJ4CmK" - } - ], - [ - "1vXMKM8SctM28AQw1wSpd7p9yCUWn1uhbbKSVTuznsw8Q2x", - "1vXMKM8SctM28AQw1wSpd7p9yCUWn1uhbbKSVTuznsw8Q2x", - { - "aura": "5FKsn83rXQQiw7HwoeYoLMoYS5GP9YVNHZiCHwA4DSwDcPVa" - } - ], - [ - "15mCQcaj3QP1UdxBF82JRd9v3riZJcVNVEmx8xkFp7DSYR4Y", - "15mCQcaj3QP1UdxBF82JRd9v3riZJcVNVEmx8xkFp7DSYR4Y", - { - "aura": "5DCg19ckcJz4m52Th4o1LcSRK3H7NsUcQsRbu7pTDM3mZ26v" - } - ] - ]' \ - > edited-chain-spec-plain.json - -# build a raw spec -$binary build-spec --chain edited-chain-spec-plain.json --raw > chain-spec-raw.json -cp edited-chain-spec-plain.json bridge-hub-polkadot-spec.json -cp chain-spec-raw.json ./parachains/chain-specs/bridge-hub-polkadot.json -cp chain-spec-raw.json bridge-hub-polkadot-spec-raw.json - -# build genesis data -$binary export-genesis-state --chain chain-spec-raw.json > bridge-hub-polkadot-genesis-head-data - -# build genesis wasm -$binary export-genesis-wasm --chain chain-spec-raw.json > bridge-hub-polkadot-wasm - -# cleanup -rm -f rt-hex.txt -rm -f chain-spec-plain.json -rm -f chain-spec-raw.json -rm -f edited-chain-spec-plain.json diff --git a/cumulus/scripts/create_glutton_spec.sh b/cumulus/scripts/create_glutton_spec.sh index c5158392f52..78aafda3bd0 100755 --- a/cumulus/scripts/create_glutton_spec.sh +++ b/cumulus/scripts/create_glutton_spec.sh @@ -55,7 +55,7 @@ for (( para_id=$from_para_id; para_id<=$to_para_id; para_id++ )); do fi # build the chain spec we'll manipulate - $binary_path build-spec --disable-default-bootnode --chain "glutton-kusama-genesis-$para_id" > "$output_para_dir/plain-glutton-$relay_chain-$para_id-spec.json" + $binary_path build-spec --disable-default-bootnode --chain "glutton-westend-genesis-$para_id" > "$output_para_dir/plain-glutton-$relay_chain-$para_id-spec.json" id="glutton-$relay_chain-$para_id" protocol_id="glutton-$relay_chain-$para_id" diff --git a/cumulus/scripts/parachains_integration_tests.sh b/cumulus/scripts/parachains_integration_tests.sh deleted file mode 100755 index 2a06b930e22..00000000000 --- a/cumulus/scripts/parachains_integration_tests.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -tests=( - asset-hub-kusama - asset-hub-polkadot -) - -rm -R logs &> /dev/null - -for t in ${tests[@]} -do - printf "\n🔍 Running $t tests...\n\n" - - mkdir -p logs/$t - - parachains-integration-tests \ - -m zombienet \ - -c ./parachains/integration-tests/$t/config.toml \ - -cl ./logs/$t/chains.log 2> /dev/null & - - parachains-integration-tests \ - -m test \ - -t ./parachains/integration-tests/$t \ - -tl ./logs/$t/tests.log & tests=$! - - wait $tests - - pkill -f polkadot - pkill -f parachain - - printf "\n🎉 $t integration tests finished! \n\n" -done diff --git a/cumulus/zombienet/examples/statemine_kusama_local_network.toml b/cumulus/zombienet/examples/asset_hub_westend_local_network.toml similarity index 71% rename from cumulus/zombienet/examples/statemine_kusama_local_network.toml rename to cumulus/zombienet/examples/asset_hub_westend_local_network.toml index 1f3debfb9d2..5b0ac1f17e8 100644 --- a/cumulus/zombienet/examples/statemine_kusama_local_network.toml +++ b/cumulus/zombienet/examples/asset_hub_westend_local_network.toml @@ -1,7 +1,7 @@ [relaychain] -default_command = "../polkadot/target/release/polkadot" +default_command = "../../target/release/polkadot" default_args = [ "-lparachain=debug" ] -chain = "kusama-local" +chain = "westend-local" [[relaychain.nodes]] name = "alice" @@ -21,47 +21,47 @@ chain = "kusama-local" [[parachains]] id = 1000 -chain = "asset-hub-kusama-local" +chain = "asset-hub-westend-local" cumulus_based = true # run alice as parachain collator [[parachains.collators]] name = "alice" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run bob as parachain collator [[parachains.collators]] name = "bob" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run charlie as parachain collator [[parachains.collators]] name = "charlie" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run dave as parachain collator [[parachains.collators]] name = "dave" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run eve as parachain collator [[parachains.collators]] name = "eve" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] # run ferdie as parachain collator [[parachains.collators]] name = "ferdie" validator = true - command = "./target/release/polkadot-parachain" + command = "../../target/release/polkadot-parachain" args = ["-lparachain=debug"] diff --git a/cumulus/zombienet/examples/bridge_hub_kusama_local_network.toml b/cumulus/zombienet/examples/bridge_hub_kusama_local_network.toml deleted file mode 100644 index ae8ae07a75c..00000000000 --- a/cumulus/zombienet/examples/bridge_hub_kusama_local_network.toml +++ /dev/null @@ -1,67 +0,0 @@ -[relaychain] -default_command = "../polkadot/target/release/polkadot" -default_args = [ "-lparachain=debug" ] -chain = "kusama-local" - - [[relaychain.nodes]] - name = "alice" - validator = true - - [[relaychain.nodes]] - name = "bob" - validator = true - - [[relaychain.nodes]] - name = "charlie" - validator = true - - [[relaychain.nodes]] - name = "dave" - validator = true - -[[parachains]] -id = 1003 -chain = "bridge-hub-kusama-local" -cumulus_based = true - - # run alice as parachain collator - [[parachains.collators]] - name = "alice" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run bob as parachain collator - [[parachains.collators]] - name = "bob" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run charlie as parachain collator - [[parachains.collators]] - name = "charlie" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run dave as parachain collator - [[parachains.collators]] - name = "dave" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run eve as parachain collator - [[parachains.collators]] - name = "eve" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run ferdie as parachain collator - [[parachains.collators]] - name = "ferdie" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] diff --git a/cumulus/zombienet/examples/bridge_hub_polkadot_local_network.toml b/cumulus/zombienet/examples/bridge_hub_polkadot_local_network.toml deleted file mode 100644 index 564fece7cae..00000000000 --- a/cumulus/zombienet/examples/bridge_hub_polkadot_local_network.toml +++ /dev/null @@ -1,67 +0,0 @@ -[relaychain] -default_command = "../polkadot/target/release/polkadot" -default_args = [ "-lparachain=debug" ] -chain = "polkadot-local" - - [[relaychain.nodes]] - name = "alice" - validator = true - - [[relaychain.nodes]] - name = "bob" - validator = true - - [[relaychain.nodes]] - name = "charlie" - validator = true - - [[relaychain.nodes]] - name = "dave" - validator = true - -[[parachains]] -id = 1003 -chain = "bridge-hub-polkadot-local" -cumulus_based = true - - # run alice as parachain collator - [[parachains.collators]] - name = "alice" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run bob as parachain collator - [[parachains.collators]] - name = "bob" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run charlie as parachain collator - [[parachains.collators]] - name = "charlie" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run dave as parachain collator - [[parachains.collators]] - name = "dave" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run eve as parachain collator - [[parachains.collators]] - name = "eve" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] - - # run ferdie as parachain collator - [[parachains.collators]] - name = "ferdie" - validator = true - command = "./target/release/polkadot-parachain" - args = ["-lparachain=debug"] diff --git a/cumulus/zombienet/examples/small_network.toml b/cumulus/zombienet/examples/small_network.toml index 06ac0d0e5e7..ab726571230 100644 --- a/cumulus/zombienet/examples/small_network.toml +++ b/cumulus/zombienet/examples/small_network.toml @@ -14,7 +14,7 @@ chain = "rococo-local" [[parachains]] id = 2000 cumulus_based = true -chain = "asset-hub-kusama-local" +chain = "asset-hub-rococo-local" # run charlie as parachain collator [[parachains.collators]] -- GitLab From f2fe6a4c56ff524d9fd3a35eda8b246740af69e8 Mon Sep 17 00:00:00 2001 From: yjh Date: Wed, 29 Nov 2023 21:48:32 +0800 Subject: [PATCH 154/199] Improve `CodeExecutor` (#2358) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since `sp-state-machine` and `GenesisConfigBuilderRuntimeCaller` always set `use_native` to be false. We should remove this param and make `NativeElseWasmExecutor` behave like its name. It could make the above components use the correct execution strategy. Maybe polkadot do not need about `NativeElseWasmExecutor` anymore. But it is still needed by other chains and it's useful for debugging. --------- Co-authored-by: Bastian Köcher Co-authored-by: command-bot <> Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> --- substrate/bin/node/cli/benches/executor.rs | 12 +--- substrate/bin/node/cli/tests/basic.rs | 69 +++++++------------ substrate/bin/node/cli/tests/common.rs | 9 ++- substrate/bin/node/cli/tests/fees.rs | 14 ++-- .../chain-spec/src/genesis_config_builder.rs | 1 - substrate/client/executor/src/executor.rs | 23 +++++-- substrate/primitives/core/src/traits.rs | 1 - substrate/primitives/state-machine/src/lib.rs | 5 +- substrate/test-utils/client/src/lib.rs | 3 +- 9 files changed, 56 insertions(+), 81 deletions(-) diff --git a/substrate/bin/node/cli/benches/executor.rs b/substrate/bin/node/cli/benches/executor.rs index d654646904b..a326e1a79ea 100644 --- a/substrate/bin/node/cli/benches/executor.rs +++ b/substrate/bin/node/cli/benches/executor.rs @@ -103,14 +103,7 @@ fn construct_block( // execute the block to get the real header. executor - .call( - ext, - &runtime_code, - "Core_initialize_block", - &header.encode(), - true, - CallContext::Offchain, - ) + .call(ext, &runtime_code, "Core_initialize_block", &header.encode(), CallContext::Offchain) .0 .unwrap(); @@ -121,7 +114,6 @@ fn construct_block( &runtime_code, "BlockBuilder_apply_extrinsic", &i.encode(), - true, CallContext::Offchain, ) .0 @@ -135,7 +127,6 @@ fn construct_block( &runtime_code, "BlockBuilder_finalize_block", &[0u8; 0], - true, CallContext::Offchain, ) .0 @@ -200,7 +191,6 @@ fn bench_execute_block(c: &mut Criterion) { &runtime_code, "Core_execute_block", &block.0, - false, CallContext::Offchain, ) .0 diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index cbceac04e8e..e5a8a397254 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -193,11 +193,9 @@ fn panic_execution_with_foreign_code_gives_error() { t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true) + let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap(); @@ -219,11 +217,9 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { t.insert(>::hashed_key().to_vec(), 69u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true) + let v = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &v[..]).unwrap(); @@ -256,14 +252,12 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { @@ -298,14 +292,12 @@ fn successful_execution_with_foreign_code_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), true).0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { @@ -337,7 +329,7 @@ fn full_native_block_import_works() { .base_extrinsic, ); - executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -412,7 +404,7 @@ fn full_native_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); let pot = t.execute_with(|| Treasury::pot()); - executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( @@ -554,7 +546,7 @@ fn full_wasm_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - executor_call(&mut t, "Core_execute_block", &block1.0, false).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -564,7 +556,7 @@ fn full_wasm_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); - executor_call(&mut t, "Core_execute_block", &block2.0, false).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( @@ -717,7 +709,7 @@ fn deploying_wasm_contract_should_work() { let mut t = new_test_ext(compact_code_unwrap()); - executor_call(&mut t, "Core_execute_block", &b.0, false).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &b.0).0.unwrap(); t.execute_with(|| { // Verify that the contract does exist by querying some of its storage items @@ -732,8 +724,7 @@ fn wasm_big_block_import_fails() { set_heap_pages(&mut t.ext(), 4); - let result = - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, false).0; + let result = executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0).0; assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) } @@ -741,7 +732,7 @@ fn wasm_big_block_import_fails() { fn native_big_block_import_succeeds() { let mut t = new_test_ext(compact_code_unwrap()); - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, true) + executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0) .0 .unwrap(); } @@ -754,11 +745,9 @@ fn native_big_block_import_fails_on_fallback() { // block. set_heap_pages(&mut t.ext(), 8); - assert!( - executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0, false,) - .0 - .is_err() - ); + assert!(executor_call(&mut t, "Core_execute_block", &block_with_size(42, 0, 120_000).0) + .0 + .is_err()); } #[test] @@ -775,15 +764,9 @@ fn panic_execution_gives_error() { t.insert(>::hashed_key().to_vec(), 0_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - false, - ) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), false) + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap(); @@ -816,13 +799,7 @@ fn successful_execution_gives_ok() { ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); - let r = executor_call( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - false, - ) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); @@ -830,7 +807,7 @@ fn successful_execution_gives_ok() { let fees = t.execute_with(|| transfer_fee(&xt())); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt()), false) + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 .unwrap(); ApplyExtrinsicResult::decode(&mut &r[..]) @@ -861,7 +838,7 @@ fn should_import_block_with_test_client() { #[test] fn default_config_as_json_works() { let mut t = new_test_ext(compact_code_unwrap()); - let r = executor_call(&mut t, "GenesisBuilder_create_default_config", &vec![], false) + let r = executor_call(&mut t, "GenesisBuilder_create_default_config", &vec![]) .0 .unwrap(); let r = Vec::::decode(&mut &r[..]).unwrap(); diff --git a/substrate/bin/node/cli/tests/common.rs b/substrate/bin/node/cli/tests/common.rs index 7d8f6a9a0e7..9019594ff62 100644 --- a/substrate/bin/node/cli/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -105,7 +105,6 @@ pub fn executor_call( t: &mut TestExternalities, method: &str, data: &[u8], - use_native: bool, ) -> (Result>, bool) { let mut t = t.ext(); @@ -117,7 +116,7 @@ pub fn executor_call( heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), }; sp_tracing::try_init_simple(); - executor().call(&mut t, &runtime_code, method, data, use_native, CallContext::Onchain) + executor().call(&mut t, &runtime_code, method, data, CallContext::Onchain) } pub fn new_test_ext(code: &[u8]) -> TestExternalities { @@ -168,12 +167,12 @@ pub fn construct_block( }; // execute the block to get the real header. - executor_call(env, "Core_initialize_block", &header.encode(), true).0.unwrap(); + executor_call(env, "Core_initialize_block", &header.encode()).0.unwrap(); for extrinsic in extrinsics.iter() { // Try to apply the `extrinsic`. It should be valid, in the sense that it passes // all pre-inclusion checks. - let r = executor_call(env, "BlockBuilder_apply_extrinsic", &extrinsic.encode(), true) + let r = executor_call(env, "BlockBuilder_apply_extrinsic", &extrinsic.encode()) .0 .expect("application of an extrinsic failed"); @@ -186,7 +185,7 @@ pub fn construct_block( } let header = Header::decode( - &mut &executor_call(env, "BlockBuilder_finalize_block", &[0u8; 0], true).0.unwrap()[..], + &mut &executor_call(env, "BlockBuilder_finalize_block", &[0u8; 0]).0.unwrap()[..], ) .unwrap(); diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 7519ce6e8b1..8c7b3c87315 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -95,7 +95,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { ); // execute a big block. - executor_call(&mut t, "Core_execute_block", &block1.0, true).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -106,7 +106,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { }); // execute a big block. - executor_call(&mut t, "Core_execute_block", &block2.0, true).0.unwrap(); + executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -151,12 +151,10 @@ fn transaction_fee_is_correct() { function: RuntimeCall::Balances(default_transfer_call()), }); - let r = - executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32)), true) - .0; + let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt.clone()), true).0; + let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt.clone())).0; assert!(r.is_ok()); t.execute_with(|| { @@ -247,7 +245,7 @@ fn block_weight_capacity_report() { len / 1024 / 1024, ); - let r = executor_call(&mut t, "Core_execute_block", &block.0, true).0; + let r = executor_call(&mut t, "Core_execute_block", &block.0).0; println!(" || Result = {:?}", r); assert!(r.is_ok()); @@ -310,7 +308,7 @@ fn block_length_capacity_report() { len / 1024 / 1024, ); - let r = executor_call(&mut t, "Core_execute_block", &block.0, true).0; + let r = executor_call(&mut t, "Core_execute_block", &block.0).0; println!(" || Result = {:?}", r); assert!(r.is_ok()); diff --git a/substrate/client/chain-spec/src/genesis_config_builder.rs b/substrate/client/chain-spec/src/genesis_config_builder.rs index 9ccf6b4efb2..68f3d886049 100644 --- a/substrate/client/chain-spec/src/genesis_config_builder.rs +++ b/substrate/client/chain-spec/src/genesis_config_builder.rs @@ -76,7 +76,6 @@ where &RuntimeCode { heap_pages: None, code_fetcher: self, hash: self.code_hash.clone() }, method, data, - false, CallContext::Offchain, ) .0 diff --git a/substrate/client/executor/src/executor.rs b/substrate/client/executor/src/executor.rs index 7c292a83da0..499bb704b16 100644 --- a/substrate/client/executor/src/executor.rs +++ b/substrate/client/executor/src/executor.rs @@ -492,7 +492,6 @@ where runtime_code: &RuntimeCode, method: &str, data: &[u8], - _use_native: bool, context: CallContext, ) -> (Result>, bool) { tracing::trace!( @@ -565,6 +564,8 @@ pub struct NativeElseWasmExecutor { /// Fallback wasm executor. wasm: WasmExecutor>, + + use_native: bool, } impl NativeElseWasmExecutor { @@ -601,7 +602,7 @@ impl NativeElseWasmExecutor { .with_runtime_cache_size(runtime_cache_size) .build(); - NativeElseWasmExecutor { native_version: D::native_version(), wasm } + NativeElseWasmExecutor { native_version: D::native_version(), wasm, use_native: true } } /// Create a new instance using the given [`WasmExecutor`]. @@ -610,7 +611,14 @@ impl NativeElseWasmExecutor { ExtendedHostFunctions, >, ) -> Self { - Self { native_version: D::native_version(), wasm: executor } + Self { native_version: D::native_version(), wasm: executor, use_native: true } + } + + /// Disable to use native runtime when possible just behave like `WasmExecutor`. + /// + /// Default to enabled. + pub fn disable_use_native(&mut self) { + self.use_native = false; } /// Ignore missing function imports if set true. @@ -645,9 +653,10 @@ impl CodeExecutor for NativeElseWasmExecut runtime_code: &RuntimeCode, method: &str, data: &[u8], - use_native: bool, context: CallContext, ) -> (Result>, bool) { + let use_native = self.use_native; + tracing::trace!( target: "executor", function = %method, @@ -711,7 +720,11 @@ impl CodeExecutor for NativeElseWasmExecut impl Clone for NativeElseWasmExecutor { fn clone(&self) -> Self { - NativeElseWasmExecutor { native_version: D::native_version(), wasm: self.wasm.clone() } + NativeElseWasmExecutor { + native_version: D::native_version(), + wasm: self.wasm.clone(), + use_native: self.use_native, + } } } diff --git a/substrate/primitives/core/src/traits.rs b/substrate/primitives/core/src/traits.rs index 9815c84f339..851d8910391 100644 --- a/substrate/primitives/core/src/traits.rs +++ b/substrate/primitives/core/src/traits.rs @@ -51,7 +51,6 @@ pub trait CodeExecutor: Sized + Send + Sync + ReadRuntimeVersion + Clone + 'stat runtime_code: &RuntimeCode, method: &str, data: &[u8], - use_native: bool, context: CallContext, ) -> (Result, Self::Error>, bool); } diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index 0e2b9bfdfff..1345097e8f6 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -289,7 +289,7 @@ mod execution { let result = self .exec - .call(&mut ext, self.runtime_code, self.method, self.call_data, false, self.context) + .call(&mut ext, self.runtime_code, self.method, self.call_data, self.context) .0; self.overlay @@ -1120,10 +1120,9 @@ mod tests { _: &RuntimeCode, _method: &str, _data: &[u8], - use_native: bool, _: CallContext, ) -> (CallResult, bool) { - let using_native = use_native && self.native_available; + let using_native = self.native_available; match (using_native, self.native_succeeds, self.fallback_succeeds) { (true, true, _) | (false, _, true) => ( Ok(vec![ diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index 084dd2a1861..f383f7c3dc3 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -263,9 +263,10 @@ impl D: sc_executor::NativeExecutionDispatch + 'static, Backend: sc_client_api::backend::Backend + 'static, { - let executor = executor.into().unwrap_or_else(|| { + let mut executor = executor.into().unwrap_or_else(|| { NativeElseWasmExecutor::new_with_wasm_executor(WasmExecutor::builder().build()) }); + executor.disable_use_native(); let executor = LocalCallExecutor::new( self.backend.clone(), executor.clone(), -- GitLab From 19f665f27902bfea62d24cd06723cdab40f060ab Mon Sep 17 00:00:00 2001 From: Tom Mi Date: Wed, 29 Nov 2023 21:49:11 +0800 Subject: [PATCH 155/199] add Rotko common good parachain nodes (#2533) rotko networks parachain bootnodes ``` # array of commands for testing parachains=( "./polkadot-parachain --chain asset-hub-polkadot --reserved-only --reserved-nodes /dns/mint14.rotko.net/tcp/33514/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW --no-hardware-benchmarks" "./polkadot-parachain --chain asset-hub-polkadot --reserved-only --reserved-nodes /dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW --no-hardware-benchmarks" "./polkadot-parachain --chain asset-hub-polkadot --reserved-only --reserved-nodes /dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW --no-hardware-benchmarks" "./polkadot-parachain --chain asset-hub-kusama --reserved-only --reserved-nodes /dns/mine14.rotko.net/tcp/33524/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu --no-hardware-benchmarks" "./polkadot-parachain --chain asset-hub-kusama --reserved-only --reserved-nodes /dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu --no-hardware-benchmarks" "./polkadot-parachain --chain asset-hub-kusama --reserved-only --reserved-nodes /dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu --no-hardware-benchmarks" "./polkadot-parachain --chain asset-hub-westend --reserved-only --reserved-nodes /dns/wmint14.rotko.net/tcp/33534/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN --no-hardware-benchmarks" "./polkadot-parachain --chain asset-hub-westend --reserved-only --reserved-nodes /dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN --no-hardware-benchmarks" "./polkadot-parachain --chain asset-hub-westend --reserved-only --reserved-nodes /dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-polkadot --reserved-only --reserved-nodes /dns/pbr13.rotko.net/tcp/33543/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-polkadot --reserved-only --reserved-nodes /dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-polkadot --reserved-only --reserved-nodes /dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-kusama --reserved-only --reserved-nodes /dns/kbr13.rotko.net/tcp/33553/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66 --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-kusama --reserved-only --reserved-nodes /dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66 --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-kusama --reserved-only --reserved-nodes /dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66 --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-westend --reserved-only --reserved-nodes /dns/wbr13.rotko.net/tcp/33563/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-westend --reserved-only --reserved-nodes /dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD --no-hardware-benchmarks" "./polkadot-parachain --chain bridge-hub-westend --reserved-only --reserved-nodes /dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD --no-hardware-benchmarks" "./polkadot-parachain --chain collectives-polkadot --reserved-only --reserved-nodes /dns/pch13.rotko.net/tcp/33573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH --no-hardware-benchmarks" "./polkadot-parachain --chain collectives-polkadot --reserved-only --reserved-nodes /dns/pch13.rotko.net/tcp/34573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH --no-hardware-benchmarks" "./polkadot-parachain --chain collectives-polkadot --reserved-only --reserved-nodes /dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH --no-hardware-benchmarks" "./polkadot-parachain --chain collectives-westend --reserved-only --reserved-nodes /dns/wch13.rotko.net/tcp/33593/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr --no-hardware-benchmarks" "./polkadot-parachain --chain collectives-westend --reserved-only --reserved-nodes /dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr --no-hardware-benchmarks" "./polkadot-parachain --chain collectives-westend --reserved-only --reserved-nodes /dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr --no-hardware-benchmarks" ) ``` --- cumulus/parachains/chain-specs/asset-hub-kusama.json | 5 ++++- cumulus/parachains/chain-specs/asset-hub-polkadot.json | 5 ++++- cumulus/parachains/chain-specs/asset-hub-westend.json | 5 ++++- cumulus/parachains/chain-specs/bridge-hub-kusama.json | 5 ++++- cumulus/parachains/chain-specs/bridge-hub-polkadot.json | 5 ++++- cumulus/parachains/chain-specs/bridge-hub-westend.json | 5 ++++- cumulus/parachains/chain-specs/collectives-polkadot.json | 5 ++++- cumulus/parachains/chain-specs/collectives-westend.json | 5 ++++- 8 files changed, 32 insertions(+), 8 deletions(-) diff --git a/cumulus/parachains/chain-specs/asset-hub-kusama.json b/cumulus/parachains/chain-specs/asset-hub-kusama.json index 946bfc5983d..fba74b17f96 100644 --- a/cumulus/parachains/chain-specs/asset-hub-kusama.json +++ b/cumulus/parachains/chain-specs/asset-hub-kusama.json @@ -22,7 +22,10 @@ "/dns/statemine-boot-ng.dwellir.com/tcp/30343/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", "/dns/statemine-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWQNJKBaNfW6Nn7HZDi5pSSEFmHL2Qz7chr9RksQUDR1Wk", "/dns/statemine-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", - "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL" + "/dns/statemine-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWCKUrE5uaXQ288ko3Ex3zCyozyJLG47KEYTopinnXNtYL", + "/dns/mine14.rotko.net/tcp/33524/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", + "/dns/mine14.rotko.net/tcp/34524/ws/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu", + "/dns/mine14.rotko.net/tcp/35524/wss/p2p/12D3KooWJUFnjR2PNbsJhudwPVaWCoZy1acPGKjM2cSuGj345BBu" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-polkadot.json b/cumulus/parachains/chain-specs/asset-hub-polkadot.json index c26506eb995..685a00ddc71 100644 --- a/cumulus/parachains/chain-specs/asset-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/asset-hub-polkadot.json @@ -22,7 +22,10 @@ "/dns/statemint-boot-ng.dwellir.com/tcp/30344/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", "/dns/statemint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWEFrNuNk8fPdQS2hf34Gmqi6dGSvrETshGJUrqrvfRDZr", "/dns/statemint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", - "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx" + "/dns/statemint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWLKxHom7f3XawRJqrF8RwiKK5Sj3qZqz5c7hF6eJeXhTx", + "/dns/mint14.rotko.net/tcp/33514/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", + "/dns/mint14.rotko.net/tcp/34514/ws/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW", + "/dns/mint14.rotko.net/tcp/35514/wss/p2p/12D3KooWKkzLjYF6M5eEs7nYiqEtRqY8SGVouoCwo3nCWsRnThDW" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/asset-hub-westend.json b/cumulus/parachains/chain-specs/asset-hub-westend.json index f0e71981e7a..6f42b5f7d8b 100644 --- a/cumulus/parachains/chain-specs/asset-hub-westend.json +++ b/cumulus/parachains/chain-specs/asset-hub-westend.json @@ -20,7 +20,10 @@ "/dns/westmint-boot-ng.dwellir.com/tcp/30345/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", "/dns/westmint-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWFZ9xqApB1wnFYkbe1qJ5Jqwxe2f3i8W25F3tKNXy59ux", "/dns/westmint-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", - "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk" + "/dns/westmint-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDoq4PVdWm5nzRSvEz3DSSKjVgRhWVUaKyi5JMKwJKYbk", + "/dns/wmint14.rotko.net/tcp/33534/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", + "/dns/wmint14.rotko.net/tcp/34534/ws/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN", + "/dns/wmint14.rotko.net/tcp/35534/wss/p2p/12D3KooWE4UDXqgtTcMCyUQ8S4uvaT8VMzzTBA6NWmKuYwTacWuN" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-kusama.json b/cumulus/parachains/chain-specs/bridge-hub-kusama.json index 9daa60fa263..0ef81806cc5 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-kusama.json +++ b/cumulus/parachains/chain-specs/bridge-hub-kusama.json @@ -22,7 +22,10 @@ "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/30337/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", "/dns/kusama-bridge-hub-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWBFskNCQDVjuUeBh6vrszWrUvYMBBhtZRLnoTZDdLYbW5", "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", - "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW" + "/dns/bridgehub-kusama-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWQMWofXj8v3RroDNnrhv1iURqm8vnaG98AdGnCn2YoDcW", + "/dns/kbr13.rotko.net/tcp/33553/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", + "/dns/kbr13.rotko.net/tcp/34553/ws/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66", + "/dns/kbr13.rotko.net/tcp/35553/wss/p2p/12D3KooWAmBp54mUEYtvsk2kxNEsDbAvdUMcaghxKXgUQxmPEQ66" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json index d3e884284b4..130bdf31ef2 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-polkadot.json +++ b/cumulus/parachains/chain-specs/bridge-hub-polkadot.json @@ -18,7 +18,10 @@ "/dns/boot-node.helikon.io/tcp/8220/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp", "/dns/boot-node.helikon.io/tcp/8222/wss/p2p/12D3KooWC38TZJA8ZBXZgAYVrceoJ56jNNLJPdpk3ojeFkTAwZVp", "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", - "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi" + "/dns/bridgehub-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWPNZm78tWUmKbta3SXdkqTPsquRc8ekEbJjZsGGi7YiRi", + "/dns/pbr13.rotko.net/tcp/33543/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", + "/dns/pbr13.rotko.net/tcp/34543/ws/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw", + "/dns/pbr13.rotko.net/tcp/35543/wss/p2p/12D3KooWMxZY7tDc2Rh454VaJJ7RexKAXVS6xSBEvTnXSGCnuGDw" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/bridge-hub-westend.json b/cumulus/parachains/chain-specs/bridge-hub-westend.json index dde3d437f41..018ab0ee6fd 100644 --- a/cumulus/parachains/chain-specs/bridge-hub-westend.json +++ b/cumulus/parachains/chain-specs/bridge-hub-westend.json @@ -16,7 +16,10 @@ "/dns/boot-node.helikon.io/tcp/9220/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp", "/dns/boot-node.helikon.io/tcp/9222/wss/p2p/12D3KooWK3K1Mu5Jjg96Lt9DUzg84KsWnZo44V4KB7mvhGqi6xnp", "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", - "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke" + "/dns/bridgehub-westend-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWBsBArCMxmQyo3feCEqMWuwyhb2LTRK8hmCCJxgrNeMke", + "/dns/wbr13.rotko.net/tcp/33563/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", + "/dns/wbr13.rotko.net/tcp/34563/ws/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD", + "/dns/wbr13.rotko.net/tcp/35563/wss/p2p/12D3KooWJyeRHpxZZbfBCNEgeUFzmRC5AMSAs2tJhjJS1k5hULkD" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-polkadot.json b/cumulus/parachains/chain-specs/collectives-polkadot.json index 003c6373429..e9f690234e4 100644 --- a/cumulus/parachains/chain-specs/collectives-polkadot.json +++ b/cumulus/parachains/chain-specs/collectives-polkadot.json @@ -22,7 +22,10 @@ "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/30341/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/polkadot-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWDMFYCNRAQcSRNV7xu2xv8319goSEbSHW4TnXRz6EpPKc", "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", - "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c" + "/dns/collectives-polkadot-bootnode.radiumblock.com/tcp/30336/wss/p2p/12D3KooWDumvnNwPbBg5inBEapgjKU7ECdMHHgwfYeGWUkzYUE1c", + "/dns/pch13.rotko.net/tcp/33573/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/pch13.rotko.net/tcp/34573/ws/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH", + "/dns/pch13.rotko.net/tcp/35573/wss/p2p/12D3KooWRXudHoazPZ9osMfdY38e8CBxQLD4RhrVeHpRSNNpcDtH" ], "telemetryEndpoints": null, "protocolId": null, diff --git a/cumulus/parachains/chain-specs/collectives-westend.json b/cumulus/parachains/chain-specs/collectives-westend.json index e06671faa04..7385889f0ec 100644 --- a/cumulus/parachains/chain-specs/collectives-westend.json +++ b/cumulus/parachains/chain-specs/collectives-westend.json @@ -20,7 +20,10 @@ "/dns/collectives-westend.bootnode.amforc.com/tcp/30340/p2p/12D3KooWERPzUhHau6o2XZRUi3tn7544rYiaHL418Nw5t8fYWP1F", "/dns/collectives-westend.bootnode.amforc.com/tcp/30333/wss/p2p/12D3KooWERPzUhHau6o2XZRUi3tn7544rYiaHL418Nw5t8fYWP1F", "/dns/westend-collectives-boot-ng.dwellir.com/tcp/30340/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", - "/dns/westend-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m" + "/dns/westend-collectives-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWPFM93jgm4pgxx8PM8WJKAJF49qia8jRB95uciUQwYh7m", + "/dns/wch13.rotko.net/tcp/33593/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", + "/dns/wch13.rotko.net/tcp/34593/ws/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr", + "/dns/wch13.rotko.net/tcp/35593/wss/p2p/12D3KooWPG85zhuSRoyptjLkFD4iJFistjiBmc15JgQ96B4fdXYr" ], "telemetryEndpoints": null, "protocolId": null, -- GitLab From 8f03570a461c5a05c09ef42ae6edcaea5ef4c503 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 16:31:31 +0200 Subject: [PATCH 156/199] Bump fs4 from 0.6.6 to 0.7.0 (#1844) --- Cargo.lock | 4 ++-- substrate/client/storage-monitor/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index db5717a67ec..f94c87dad48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5632,9 +5632,9 @@ dependencies = [ [[package]] name = "fs4" -version = "0.6.6" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eeb4ed9e12f43b7fa0baae3f9cdda28352770132ef2e09a23760c29cae8bd47" +checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ "rustix 0.38.21", "windows-sys 0.48.0", diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index 021ee76240b..4c1f2e46504 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -11,7 +11,7 @@ homepage = "https://substrate.io" [dependencies] clap = { version = "4.4.6", features = ["derive", "string"] } log = "0.4.17" -fs4 = "0.6.3" +fs4 = "0.7.0" sc-client-db = { path = "../db", default-features = false} sp-core = { path = "../../primitives/core" } tokio = "1.22.0" -- GitLab From d3d301fa422f113ee7fa4d62fa45b155bd42a20a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 29 Nov 2023 15:31:51 +0100 Subject: [PATCH 157/199] ParachainHost: No need to be generic over the block or hash type (#2537) The `BlockNumber` and `Hash` are fixed types any way. --- polkadot/node/service/src/fake_runtime_api.rs | 2 +- polkadot/primitives/src/runtime_api.rs | 35 ++++++++++--------- polkadot/runtime/rococo/src/lib.rs | 2 +- polkadot/runtime/test-runtime/src/lib.rs | 2 +- polkadot/runtime/westend/src/lib.rs | 2 +- 5 files changed, 23 insertions(+), 20 deletions(-) diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index d9553afa024..ccc3da22400 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -116,7 +116,7 @@ sp_api::impl_runtime_apis! { } } - impl runtime_api::ParachainHost for Runtime { + impl runtime_api::ParachainHost for Runtime { fn validators() -> Vec { unimplemented!() } diff --git a/polkadot/primitives/src/runtime_api.rs b/polkadot/primitives/src/runtime_api.rs index e4c1d590f45..331728b2590 100644 --- a/polkadot/primitives/src/runtime_api.rs +++ b/polkadot/primitives/src/runtime_api.rs @@ -116,11 +116,10 @@ use crate::{ async_backing, slashing, vstaging, AsyncBackingParams, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - ExecutorParams, GroupRotationInfo, OccupiedCoreAssumption, PersistedValidationData, + ExecutorParams, GroupRotationInfo, Hash, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, ValidatorSignature, }; -use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives as pcp; use polkadot_parachain_primitives::primitives as ppp; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -128,18 +127,18 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. #[api_version(5)] - pub trait ParachainHost { + pub trait ParachainHost { /// Get the current validators. fn validators() -> Vec; /// Returns the validator groups and rotation info localized based on the hypothetical child /// of a block whose state this is invoked on. Note that `now` in the `GroupRotationInfo` /// should be the successor of the number of the block. - fn validator_groups() -> (Vec>, GroupRotationInfo); + fn validator_groups() -> (Vec>, GroupRotationInfo); /// Yields information on all availability cores as relevant to the child block. /// Cores are either free or occupied. Free cores can have paras assigned to them. - fn availability_cores() -> Vec>; + fn availability_cores() -> Vec>; /// Yields the persisted validation data for the given `ParaId` along with an assumption that /// should be used if the para currently occupies a core. @@ -147,15 +146,15 @@ sp_api::decl_runtime_apis! { /// Returns `None` if either the para is not registered or the assumption is `Freed` /// and the para already occupies a core. fn persisted_validation_data(para_id: ppp::Id, assumption: OccupiedCoreAssumption) - -> Option>; + -> Option>; /// Returns the persisted validation data for the given `ParaId` along with the corresponding /// validation code hash. Instead of accepting assumption about the para, matches the validation /// data hash against an expected one and yields `None` if they're not equal. fn assumed_validation_data( para_id: ppp::Id, - expected_persisted_validation_data_hash: pcp::v2::Hash, - ) -> Option<(PersistedValidationData, ppp::ValidationCodeHash)>; + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ppp::ValidationCodeHash)>; /// Checks if the given validation outputs pass the acceptance criteria. fn check_validation_outputs(para_id: ppp::Id, outputs: CandidateCommitments) -> bool; @@ -169,30 +168,34 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if either the para is not registered or the assumption is `Freed` /// and the para already occupies a core. - fn validation_code(para_id: ppp::Id, assumption: OccupiedCoreAssumption) - -> Option; + fn validation_code( + para_id: ppp::Id, + assumption: OccupiedCoreAssumption, + ) -> Option; /// Get the receipt of a candidate pending availability. This returns `Some` for any paras /// assigned to occupied cores in `availability_cores` and `None` otherwise. - fn candidate_pending_availability(para_id: ppp::Id) -> Option>; + fn candidate_pending_availability(para_id: ppp::Id) -> Option>; /// Get a vector of events concerning candidates that occurred within a block. - fn candidate_events() -> Vec>; + fn candidate_events() -> Vec>; /// Get all the pending inbound messages in the downward message queue for a para. fn dmq_contents( recipient: ppp::Id, - ) -> Vec>; + ) -> Vec>; /// Get the contents of all channels addressed to the given recipient. Channels that have no /// messages in them are also included. - fn inbound_hrmp_channels_contents(recipient: ppp::Id) -> BTreeMap>>; + fn inbound_hrmp_channels_contents( + recipient: ppp::Id, + ) -> BTreeMap>>; /// Get the validation code from its hash. fn validation_code_by_hash(hash: ppp::ValidationCodeHash) -> Option; /// Scrape dispute relevant from on-chain, backing votes and resolved disputes. - fn on_chain_votes() -> Option>; + fn on_chain_votes() -> Option>; /***** Added in v2 *****/ @@ -253,7 +256,7 @@ sp_api::decl_runtime_apis! { /// Returns the state of parachain backing for a given para. #[api_version(7)] - fn para_backing_state(_: ppp::Id) -> Option>; + fn para_backing_state(_: ppp::Id) -> Option>; /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. #[api_version(7)] diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 5ac92a73732..23d32b59a51 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1802,7 +1802,7 @@ sp_api::impl_runtime_apis! { } #[api_version(9)] - impl primitives::runtime_api::ParachainHost for Runtime { + impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 596e65eca06..7ec9f63748c 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -826,7 +826,7 @@ sp_api::impl_runtime_apis! { } } - impl primitives::runtime_api::ParachainHost for Runtime { + impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { runtime_impl::validators::() } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index f7ff2d5e9e1..36c8c12be21 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1796,7 +1796,7 @@ sp_api::impl_runtime_apis! { } #[api_version(9)] - impl primitives::runtime_api::ParachainHost for Runtime { + impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() } -- GitLab From 2858cbc2480b6747978b9fec75b620688fe0a5b2 Mon Sep 17 00:00:00 2001 From: "Will | Paradox | ParaNodes.io" <79228812+paradox-tt@users.noreply.github.com> Date: Wed, 29 Nov 2023 14:40:26 +0000 Subject: [PATCH 158/199] Adding LuckyFriday's Bootnodes per IBP Application (#2538) Good day, This PR requests the inclusion of two bootnode entries for Polkadot, Kusama and Westend as part of LuckyFriday's IBP application. The nodes are hosted on self-owned hardware in a co-located facility. We've undertaken connectivity tests ourselves and also from members of the IBP. The test commands used are as follows: ``` polkadot --chain westend --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/ibp-boot-westend.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWDg1YEytdwFFNWroFj6gio4YFsMB3miSbHKgdpJteUMB9" --no-hardware-benchmarks polkadot --chain westend --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/ibp-boot-westend.luckyfriday.io/tcp/30333/p2p/12D3KooWDg1YEytdwFFNWroFj6gio4YFsMB3miSbHKgdpJteUMB9" --no-hardware-benchmarks polkadot --chain kusama --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" --no-hardware-benchmarks polkadot --chain kusama --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30333/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" --no-hardware-benchmarks polkadot --chain polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ" --no-hardware-benchmarks polkadot --chain polkadot --base-path /tmp/node --name "Boot" --reserved-only --reserved-nodes "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30333/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ" --no-hardware-benchmarks ``` All tests yielded syncing with 1 peer, a positive result. We have also backed up our node-key in the event that restoration is required. I hope that the aforementioned meets the requirement for inclusion and look forward to a speedy turnaround. Kind Regards, Will | Paradox --- polkadot/node/service/chain-specs/kusama.json | 4 +++- polkadot/node/service/chain-specs/polkadot.json | 4 +++- polkadot/node/service/chain-specs/westend.json | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/polkadot/node/service/chain-specs/kusama.json b/polkadot/node/service/chain-specs/kusama.json index 6676bbe154b..979550c7570 100644 --- a/polkadot/node/service/chain-specs/kusama.json +++ b/polkadot/node/service/chain-specs/kusama.json @@ -33,7 +33,9 @@ "/dns/ksm-bootnode.stakeworld.io/tcp/30301/ws/p2p/12D3KooWFRin7WWVS6RgUsSpkfUHSv4tfGKnr2zJPmf1pbMv118H", "/dns/ksm-bootnode.stakeworld.io/tcp/30302/wss/p2p/12D3KooWFRin7WWVS6RgUsSpkfUHSv4tfGKnr2zJPmf1pbMv118H", "/dns/ksm14.rotko.net/tcp/35224/wss/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", - "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK" + "/dns/ksm14.rotko.net/tcp/33224/p2p/12D3KooWAa5THTw8HPfnhEei23HdL8P9McBXdozG2oTtMMksjZkK", + "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30333/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT", + "/dns/ibp-boot-kusama.luckyfriday.io/tcp/30334/wss/p2p/12D3KooW9vu1GWHBuxyhm7rZgD3fhGZpNajPXFexadvhujWMgwfT" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/polkadot.json b/polkadot/node/service/chain-specs/polkadot.json index 53349208816..71dbb900403 100644 --- a/polkadot/node/service/chain-specs/polkadot.json +++ b/polkadot/node/service/chain-specs/polkadot.json @@ -34,7 +34,9 @@ "/dns/dot-bootnode.stakeworld.io/tcp/30311/ws/p2p/12D3KooWAb5MyC1UJiEQJk4Hg4B2Vi3AJdqSUhTGYUqSnEqCFMFg", "/dns/dot-bootnode.stakeworld.io/tcp/30312/wss/p2p/12D3KooWAb5MyC1UJiEQJk4Hg4B2Vi3AJdqSUhTGYUqSnEqCFMFg", "/dns/dot14.rotko.net/tcp/35214/wss/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", - "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff" + "/dns/dot14.rotko.net/tcp/33214/p2p/12D3KooWPyEvPEXghnMC67Gff6PuZiSvfx3fmziKiPZcGStZ5xff", + "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30333/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ", + "/dns/ibp-boot-polkadot.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWEjk6QXrZJ26fLpaajisJGHiz6WiQsR8k7mkM9GmWKnRZ" ], "telemetryEndpoints": [ [ diff --git a/polkadot/node/service/chain-specs/westend.json b/polkadot/node/service/chain-specs/westend.json index b2ffba9304b..697675871fc 100644 --- a/polkadot/node/service/chain-specs/westend.json +++ b/polkadot/node/service/chain-specs/westend.json @@ -31,7 +31,9 @@ "/dns/wnd-bootnode.stakeworld.io/tcp/30321/ws/p2p/12D3KooWBYdKipcNbrV5rCbgT5hco8HMLME7cE9hHC3ckqCKDuzP", "/dns/wnd-bootnode.stakeworld.io/tcp/30322/wss/p2p/12D3KooWBYdKipcNbrV5rCbgT5hco8HMLME7cE9hHC3ckqCKDuzP", "/dns/wnd14.rotko.net/tcp/35234/wss/p2p/12D3KooWLK8Zj1uZ46phU3vQwiDVda8tB76S8J26rXZQLHpwWkDJ", - "/dns/wnd14.rotko.net/tcp/33234/p2p/12D3KooWLK8Zj1uZ46phU3vQwiDVda8tB76S8J26rXZQLHpwWkDJ" + "/dns/wnd14.rotko.net/tcp/33234/p2p/12D3KooWLK8Zj1uZ46phU3vQwiDVda8tB76S8J26rXZQLHpwWkDJ", + "/dns/ibp-boot-westend.luckyfriday.io/tcp/30333/p2p/12D3KooWDg1YEytdwFFNWroFj6gio4YFsMB3miSbHKgdpJteUMB9", + "/dns/ibp-boot-westend.luckyfriday.io/tcp/30334/wss/p2p/12D3KooWDg1YEytdwFFNWroFj6gio4YFsMB3miSbHKgdpJteUMB9" ], "telemetryEndpoints": [ [ -- GitLab From 08f29af6cd7866e1010a15c2a0ddfc1a07f3a597 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 29 Nov 2023 17:29:50 +0100 Subject: [PATCH 159/199] sp-api: Sprinkle some `automatically_derived` attributes This attribute is informing tooling that the code is automatically derived and thus, should not enable any linting. --- .../primitives/api/proc-macro/src/impl_runtime_apis.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index e439a796e28..e97291bc58a 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -275,6 +275,7 @@ fn generate_runtime_api_base_structures() -> Result { extensions_generated_for: std::cell::RefCell>, } + #[automatically_derived] impl> #crate_::ApiExt for RuntimeApiImpl { @@ -367,6 +368,7 @@ fn generate_runtime_api_base_structures() -> Result { } } + #[automatically_derived] impl #crate_::ConstructRuntimeApi for RuntimeApi where @@ -389,6 +391,7 @@ fn generate_runtime_api_base_structures() -> Result { } } + #[automatically_derived] impl> RuntimeApiImpl { fn commit_or_rollback_transaction(&self, commit: bool) { let proof = "\ @@ -685,9 +688,11 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result // remove the trait to get just the module path runtime_mod_path.segments.pop(); - let processed_impl = + let mut processed_impl = ApiRuntimeImplToApiRuntimeApiImpl { runtime_block }.process(impl_.clone()); + processed_impl.attrs.push(parse_quote!(#[automatically_derived])); + result.push(processed_impl); } -- GitLab From eb46b99b29e80870caa96bb46158bc955429d105 Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Wed, 29 Nov 2023 21:20:05 +0400 Subject: [PATCH 160/199] Disable trace-level logging from the `test-linux-stable-int` (#2546) --- .gitlab/pipeline/test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 190249d2d0f..265b378ef5b 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -395,7 +395,6 @@ test-linux-stable-int: RUN_UI_TESTS: 1 script: - WASM_BUILD_NO_COLOR=1 - RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace time cargo test -p staging-node-cli --release --locked -- --ignored # more information about this job can be found here: -- GitLab From ecdf34392527e5b3fd50879bb57f8d860eafabe3 Mon Sep 17 00:00:00 2001 From: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Date: Wed, 29 Nov 2023 20:15:33 +0200 Subject: [PATCH 161/199] chainHead: Backport error codes from spec (#2539) This PR backports the error codes from the spec. This relies on two specs for defining the error codes: - Our rpc-spec-v2 https://github.com/paritytech/json-rpc-interface-spec. - JSON-RPC spec https://www.jsonrpc.org/specification#error_object. To better describe the error codes, they are divided into two separate modules `rpc_spec_v2` and `json_rpc_spec` respectively. The `InvalidSubscriptionID` and `FetchBlockHeader` are merged into the JSON-RPC spec `INTERNAL_ERROR`. While the other error codes are adjusted from spec. Errors that are currently in use: - -32801 block hash not reported by chainHead_follow or block hash has been unpinned - -32802 chainHead_follow started with withRuntime == false - -32803 chainHead_follow did not generate an operationWaitingForContinue event The following are errors defined in the [JSON-RPC spec](https://www.jsonrpc.org/specification#error_object): - -32602 The provided parameter isn't one of the expected values, has different format or is missing - -32603 Internal server error Note: Error `-32801` must be introduced and generated by the outstanding https://github.com/paritytech/polkadot-sdk/issues/1505 Closes: https://github.com/paritytech/polkadot-sdk/issues/2530 --------- Signed-off-by: Alexandru Vasile --- .../rpc-spec-v2/src/chain_head/chain_head.rs | 8 ++- .../rpc-spec-v2/src/chain_head/error.rs | 64 +++++++++++-------- .../rpc-spec-v2/src/chain_head/tests.rs | 24 +++---- 3 files changed, 54 insertions(+), 42 deletions(-) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs index 866701a7dbf..8e04ac7b177 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/chain_head.rs @@ -198,7 +198,9 @@ where let sub_id = match self.accept_subscription(&mut sink) { Ok(sub_id) => sub_id, Err(err) => { - sink.close(ChainHeadRpcError::InvalidSubscriptionID); + sink.close(ChainHeadRpcError::InternalError( + "Cannot generate subscription ID".into(), + )); return Err(err) }, }; @@ -306,7 +308,7 @@ where self.client .header(hash) .map(|opt_header| opt_header.map(|h| hex_string(&h.encode()))) - .map_err(ChainHeadRpcError::FetchBlockHeader) + .map_err(|err| ChainHeadRpcError::InternalError(err.to_string())) .map_err(Into::into) } @@ -393,7 +395,7 @@ where // Reject subscription if with_runtime is false. if !block_guard.has_runtime() { - return Err(ChainHeadRpcError::InvalidParam( + return Err(ChainHeadRpcError::InvalidRuntimeCall( "The runtime updates flag must be set".to_string(), ) .into()) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/error.rs b/substrate/client/rpc-spec-v2/src/chain_head/error.rs index 811666428c5..a9b7d7f96e4 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/error.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/error.rs @@ -22,7 +22,6 @@ use jsonrpsee::{ core::Error as RpcError, types::error::{CallError, ErrorObject}, }; -use sp_blockchain::Error as BlockchainError; /// ChainHead RPC errors. #[derive(Debug, thiserror::Error)] @@ -30,44 +29,55 @@ pub enum Error { /// The provided block hash is invalid. #[error("Invalid block hash")] InvalidBlock, - /// Fetch block header error. - #[error("Could not fetch block header: {0}")] - FetchBlockHeader(BlockchainError), + /// The follow subscription was started with `withRuntime` set to `false`. + #[error("The `chainHead_follow` subscription was started with `withRuntime` set to `false`")] + InvalidRuntimeCall(String), + /// Wait-for-continue event not generated. + #[error("Wait for continue event was not generated for the subscription")] + InvalidContinue, /// Invalid parameter provided to the RPC method. #[error("Invalid parameter: {0}")] InvalidParam(String), - /// Invalid subscription ID provided by the RPC server. - #[error("Invalid subscription ID")] - InvalidSubscriptionID, + /// Internal error. + #[error("Internal error: {0}")] + InternalError(String), +} + +/// Errors for `chainHead` RPC module, as defined in +/// . +pub mod rpc_spec_v2 { + /// The provided block hash is invalid. + pub const INVALID_BLOCK_ERROR: i32 = -32801; + /// The follow subscription was started with `withRuntime` set to `false`. + pub const INVALID_RUNTIME_CALL: i32 = -32802; /// Wait-for-continue event not generated. - #[error("Wait for continue event was not generated for the subscription")] - InvalidContinue, + pub const INVALID_CONTINUE: i32 = -32803; } -// Base code for all `chainHead` errors. -const BASE_ERROR: i32 = 2000; -/// The provided block hash is invalid. -const INVALID_BLOCK_ERROR: i32 = BASE_ERROR + 1; -/// Fetch block header error. -const FETCH_BLOCK_HEADER_ERROR: i32 = BASE_ERROR + 2; -/// Invalid parameter error. -const INVALID_PARAM_ERROR: i32 = BASE_ERROR + 3; -/// Invalid subscription ID. -const INVALID_SUB_ID: i32 = BASE_ERROR + 4; -/// Wait-for-continue event not generated. -const INVALID_CONTINUE: i32 = BASE_ERROR + 5; +/// General purpose errors, as defined in +/// . +pub mod json_rpc_spec { + /// Invalid parameter error. + pub const INVALID_PARAM_ERROR: i32 = -32602; + /// Internal error. + pub const INTERNAL_ERROR: i32 = -32603; +} impl From for ErrorObject<'static> { fn from(e: Error) -> Self { let msg = e.to_string(); match e { - Error::InvalidBlock => ErrorObject::owned(INVALID_BLOCK_ERROR, msg, None::<()>), - Error::FetchBlockHeader(_) => - ErrorObject::owned(FETCH_BLOCK_HEADER_ERROR, msg, None::<()>), - Error::InvalidParam(_) => ErrorObject::owned(INVALID_PARAM_ERROR, msg, None::<()>), - Error::InvalidSubscriptionID => ErrorObject::owned(INVALID_SUB_ID, msg, None::<()>), - Error::InvalidContinue => ErrorObject::owned(INVALID_CONTINUE, msg, None::<()>), + Error::InvalidBlock => + ErrorObject::owned(rpc_spec_v2::INVALID_BLOCK_ERROR, msg, None::<()>), + Error::InvalidRuntimeCall(_) => + ErrorObject::owned(rpc_spec_v2::INVALID_RUNTIME_CALL, msg, None::<()>), + Error::InvalidContinue => + ErrorObject::owned(rpc_spec_v2::INVALID_CONTINUE, msg, None::<()>), + Error::InvalidParam(_) => + ErrorObject::owned(json_rpc_spec::INVALID_PARAM_ERROR, msg, None::<()>), + Error::InternalError(_) => + ErrorObject::owned(json_rpc_spec::INTERNAL_ERROR, msg, None::<()>), } } } diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index 518b7da432b..c8f2362b9eb 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -360,7 +360,7 @@ async fn get_header() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Obtain the valid header. @@ -389,7 +389,7 @@ async fn get_body() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Valid call. @@ -474,7 +474,7 @@ async fn call_runtime() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Pass an invalid parameters that cannot be decode. @@ -487,7 +487,7 @@ async fn call_runtime() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2003 && err.message().contains("Invalid parameter") + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::json_rpc_spec::INVALID_PARAM_ERROR && err.message().contains("Invalid parameter") ); // Valid call. @@ -590,7 +590,7 @@ async fn call_runtime_without_flag() { .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2003 && err.message().contains("The runtime updates flag must be set") + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_RUNTIME_CALL && err.message().contains("subscription was started with `withRuntime` set to `false`") ); } @@ -628,7 +628,7 @@ async fn get_storage_hash() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Valid call without storage at the key. @@ -896,7 +896,7 @@ async fn get_storage_value() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Valid call without storage at the key. @@ -1571,7 +1571,7 @@ async fn follow_with_unpin() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // To not exceed the number of pinned blocks, we need to unpin before the next import. @@ -1720,7 +1720,7 @@ async fn follow_with_multiple_unpin_hashes() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); let _res: () = api @@ -1737,7 +1737,7 @@ async fn follow_with_multiple_unpin_hashes() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); // Unpin multiple blocks. @@ -1755,7 +1755,7 @@ async fn follow_with_multiple_unpin_hashes() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); let err = api @@ -1766,7 +1766,7 @@ async fn follow_with_multiple_unpin_hashes() { .await .unwrap_err(); assert_matches!(err, - Error::Call(CallError::Custom(ref err)) if err.code() == 2001 && err.message() == "Invalid block hash" + Error::Call(CallError::Custom(ref err)) if err.code() == super::error::rpc_spec_v2::INVALID_BLOCK_ERROR && err.message() == "Invalid block hash" ); } -- GitLab From c68ce6e1472be821343ba7057857bec5626dc791 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Wed, 29 Nov 2023 20:19:44 +0100 Subject: [PATCH 162/199] state-db: log_target usage fixed (#2547) Use `LOG_TARGET/LOG_TARGET_PIN` in logs. --- substrate/client/state-db/src/lib.rs | 6 +++--- substrate/client/state-db/src/pruning.rs | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs index c656f126ae6..41c231c31aa 100644 --- a/substrate/client/state-db/src/lib.rs +++ b/substrate/client/state-db/src/lib.rs @@ -474,7 +474,7 @@ impl StateDbSync { if have_block { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { - trace!(target: "state-db-pin", "Pinned block: {:?}", hash); + trace!(target: LOG_TARGET_PIN, "Pinned block: {:?}", hash); self.non_canonical.pin(hash); } *refs += 1; @@ -491,11 +491,11 @@ impl StateDbSync { Entry::Occupied(mut entry) => { *entry.get_mut() -= 1; if *entry.get() == 0 { - trace!(target: "state-db-pin", "Unpinned block: {:?}", hash); + trace!(target: LOG_TARGET_PIN, "Unpinned block: {:?}", hash); entry.remove(); self.non_canonical.unpin(hash); } else { - trace!(target: "state-db-pin", "Releasing reference for {:?}", hash); + trace!(target: LOG_TARGET_PIN, "Releasing reference for {:?}", hash); } }, Entry::Vacant(_) => {}, diff --git a/substrate/client/state-db/src/pruning.rs b/substrate/client/state-db/src/pruning.rs index 623d30b098b..ae8a9a12490 100644 --- a/substrate/client/state-db/src/pruning.rs +++ b/substrate/client/state-db/src/pruning.rs @@ -385,7 +385,7 @@ impl RefWindow { /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. pub fn prune_one(&mut self, commit: &mut CommitSet) -> Result<(), Error> { if let Some(pruned) = self.queue.pop_front(self.base)? { - trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); + trace!(target: LOG_TARGET, "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.base; commit.data.deleted.extend(pruned.deleted.into_iter()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); @@ -393,7 +393,7 @@ impl RefWindow { self.base += 1; Ok(()) } else { - trace!(target: "state-db", "Trying to prune when there's nothing to prune"); + trace!(target: LOG_TARGET, "Trying to prune when there's nothing to prune"); Err(Error::StateDb(StateDbError::BlockUnavailable)) } } @@ -418,7 +418,7 @@ impl RefWindow { return Err(Error::StateDb(StateDbError::InvalidBlockNumber)) } trace!( - target: "state-db", + target: LOG_TARGET, "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), -- GitLab From f69069bd4202ae35b2022fe4b78963197ecfcbc0 Mon Sep 17 00:00:00 2001 From: Joshy Orndorff Date: Wed, 29 Nov 2023 15:27:04 -0500 Subject: [PATCH 163/199] Parachain Template: Prune `AuxStore` bound and corresponding crate dependency (#2303) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This small PR removes an unnecessary trait bound to the `AuxStore` trait from the Parachain template's `rpc.rs` file. With that bound removed, the entire dependency on `sc-client-api` can also be removed. --------- Co-authored-by: Joshy Orndorff Co-authored-by: Bastian Köcher --- cumulus/parachain-template/node/src/rpc.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/cumulus/parachain-template/node/src/rpc.rs b/cumulus/parachain-template/node/src/rpc.rs index b5ca484fa48..ed4003ed6d2 100644 --- a/cumulus/parachain-template/node/src/rpc.rs +++ b/cumulus/parachain-template/node/src/rpc.rs @@ -9,7 +9,6 @@ use std::sync::Arc; use parachain_template_runtime::{opaque::Block, AccountId, Balance, Nonce}; -use sc_client_api::AuxStore; pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor}; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; @@ -36,7 +35,6 @@ pub fn create_full( where C: ProvideRuntimeApi + HeaderBackend - + AuxStore + HeaderMetadata + Send + Sync -- GitLab From 2135fa872b810799e997657cc621579e703774ca Mon Sep 17 00:00:00 2001 From: PG Herveou Date: Wed, 29 Nov 2023 22:12:19 +0100 Subject: [PATCH 164/199] Contracts: use compiled rust tests (#2347) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit see #2189 This PR does the following: - Bring the user api functions into a new pallet-contracts-uapi (They are currently defined in ink! [here])(https://github.com/paritytech/ink/blob/master/crates/env/src/engine/on_chain/ext.rs) - Add older api versions and unstable to the user api trait. - Remove pallet-contracts-primitives and bring the types it defined in uapi / pallet-contracts - Add the infrastructure to build fixtures from Rust files and test it works by replacing `dummy.wat` and `call.wat` - Move all the doc from wasm/runtime.rs to pallet-contracts-uapi. This will be done in a follow up: - convert the rest of the test from .wat to rust - bring risc-v uapi up to date with wasm - finalize the uapi host fns, making sure everything is codegen from the source host fns in pallet-contracts --------- Co-authored-by: Alexander Theißen --- Cargo.lock | 92 +- Cargo.toml | 3 +- .../contracts/contracts-rococo/Cargo.toml | 2 - .../contracts/contracts-rococo/src/lib.rs | 10 +- substrate/bin/node/runtime/Cargo.toml | 2 - substrate/bin/node/runtime/src/lib.rs | 10 +- substrate/frame/contracts/Cargo.toml | 4 +- substrate/frame/contracts/build.rs | 1 - substrate/frame/contracts/fixtures/Cargo.toml | 16 +- substrate/frame/contracts/fixtures/build.rs | 277 ++++++ .../contracts/fixtures/contracts/call.rs | 48 + .../fixtures/contracts/common/Cargo.toml | 9 + .../fixtures/contracts/common/src/lib.rs | 31 + .../contracts/fixtures/contracts/dummy.rs | 26 + .../frame/contracts/fixtures/data/call.wat | 39 - .../frame/contracts/fixtures/data/dummy.wat | 6 - substrate/frame/contracts/fixtures/src/lib.rs | 40 +- .../frame/contracts/mock-network/Cargo.toml | 3 +- .../frame/contracts/mock-network/src/tests.rs | 3 +- .../frame/contracts/primitives/Cargo.toml | 33 - .../frame/contracts/primitives/README.md | 3 - .../frame/contracts/proc-macro/src/lib.rs | 12 +- .../frame/contracts/src/benchmarking/mod.rs | 2 +- .../frame/contracts/src/chain_extension.rs | 2 +- substrate/frame/contracts/src/debug.rs | 6 +- substrate/frame/contracts/src/exec.rs | 4 +- substrate/frame/contracts/src/lib.rs | 8 +- .../src/lib.rs => src/primitives.rs} | 15 +- substrate/frame/contracts/src/tests.rs | 4 +- .../frame/contracts/src/tests/test_debug.rs | 2 +- substrate/frame/contracts/src/wasm/mod.rs | 23 +- substrate/frame/contracts/src/wasm/runtime.rs | 908 +++--------------- substrate/frame/contracts/uapi/Cargo.toml | 23 + substrate/frame/contracts/uapi/src/flags.rs | 73 ++ substrate/frame/contracts/uapi/src/host.rs | 793 +++++++++++++++ .../frame/contracts/uapi/src/host/riscv32.rs | 14 + .../frame/contracts/uapi/src/host/wasm32.rs | 811 ++++++++++++++++ substrate/frame/contracts/uapi/src/lib.rs | 139 +++ 38 files changed, 2520 insertions(+), 977 deletions(-) create mode 100644 substrate/frame/contracts/fixtures/build.rs create mode 100644 substrate/frame/contracts/fixtures/contracts/call.rs create mode 100644 substrate/frame/contracts/fixtures/contracts/common/Cargo.toml create mode 100644 substrate/frame/contracts/fixtures/contracts/common/src/lib.rs create mode 100644 substrate/frame/contracts/fixtures/contracts/dummy.rs delete mode 100644 substrate/frame/contracts/fixtures/data/call.wat delete mode 100644 substrate/frame/contracts/fixtures/data/dummy.wat delete mode 100644 substrate/frame/contracts/primitives/Cargo.toml delete mode 100644 substrate/frame/contracts/primitives/README.md rename substrate/frame/contracts/{primitives/src/lib.rs => src/primitives.rs} (96%) create mode 100644 substrate/frame/contracts/uapi/Cargo.toml create mode 100644 substrate/frame/contracts/uapi/src/flags.rs create mode 100644 substrate/frame/contracts/uapi/src/host.rs create mode 100644 substrate/frame/contracts/uapi/src/host/riscv32.rs create mode 100644 substrate/frame/contracts/uapi/src/host/wasm32.rs create mode 100644 substrate/frame/contracts/uapi/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index f94c87dad48..346ce2f933f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2834,7 +2834,6 @@ dependencies = [ "pallet-balances", "pallet-collator-selection", "pallet-contracts", - "pallet-contracts-primitives", "pallet-insecure-randomness-collective-flip", "pallet-message-queue", "pallet-multisig", @@ -6828,7 +6827,6 @@ dependencies = [ "pallet-child-bounties", "pallet-collective", "pallet-contracts", - "pallet-contracts-primitives", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-democracy", @@ -8465,7 +8463,7 @@ dependencies = [ "itertools 0.10.5", "tar", "tempfile", - "toml_edit", + "toml_edit 0.19.14", ] [[package]] @@ -9380,8 +9378,8 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-contracts-fixtures", - "pallet-contracts-primitives", "pallet-contracts-proc-macro", + "pallet-contracts-uapi", "pallet-insecure-randomness-collective-flip", "pallet-message-queue", "pallet-proxy", @@ -9412,11 +9410,21 @@ dependencies = [ name = "pallet-contracts-fixtures" version = "1.0.0" dependencies = [ + "anyhow", + "cfg-if", "frame-system", + "parity-wasm", "sp-runtime", + "tempfile", + "toml 0.8.8", + "twox-hash", "wat", ] +[[package]] +name = "pallet-contracts-fixtures-common" +version = "1.0.0" + [[package]] name = "pallet-contracts-mock-network" version = "1.0.0" @@ -9428,8 +9436,8 @@ dependencies = [ "pallet-balances", "pallet-contracts", "pallet-contracts-fixtures", - "pallet-contracts-primitives", "pallet-contracts-proc-macro", + "pallet-contracts-uapi", "pallet-insecure-randomness-collective-flip", "pallet-message-queue", "pallet-proxy", @@ -9455,18 +9463,6 @@ dependencies = [ "xcm-simulator", ] -[[package]] -name = "pallet-contracts-primitives" -version = "24.0.0" -dependencies = [ - "bitflags 1.3.2", - "parity-scale-codec", - "scale-info", - "sp-runtime", - "sp-std 8.0.0", - "sp-weights", -] - [[package]] name = "pallet-contracts-proc-macro" version = "4.0.0-dev" @@ -9476,6 +9472,16 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "pallet-contracts-uapi" +version = "4.0.0-dev" +dependencies = [ + "bitflags 1.3.2", + "parity-scale-codec", + "paste", + "scale-info", +] + [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" @@ -13415,7 +13421,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" dependencies = [ "once_cell", - "toml_edit", + "toml_edit 0.19.14", ] [[package]] @@ -13878,6 +13884,15 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_users" version = "0.4.3" @@ -16400,9 +16415,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" dependencies = [ "serde", ] @@ -18758,13 +18773,13 @@ checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand 2.0.0", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -19227,14 +19242,26 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit", + "toml_edit 0.19.14", +] + +[[package]] +name = "toml" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.21.0", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -19252,6 +19279,19 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tower" version = "0.4.13" diff --git a/Cargo.toml b/Cargo.toml index e154c2f3bc8..ac474ae5686 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -283,8 +283,9 @@ members = [ "substrate/frame/collective", "substrate/frame/contracts", "substrate/frame/contracts/fixtures", + "substrate/frame/contracts/fixtures/contracts/common", + "substrate/frame/contracts/uapi", "substrate/frame/contracts/mock-network", - "substrate/frame/contracts/primitives", "substrate/frame/contracts/proc-macro", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index eded360436b..f5626a90f36 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -52,7 +52,6 @@ pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/ pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} pallet-contracts = { path = "../../../../../substrate/frame/contracts", default-features = false} -pallet-contracts-primitives = { path = "../../../../../substrate/frame/contracts/primitives", default-features = false} # Polkadot pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } @@ -102,7 +101,6 @@ std = [ "pallet-authorship/std", "pallet-balances/std", "pallet-collator-selection/std", - "pallet-contracts-primitives/std", "pallet-contracts/std", "pallet-insecure-randomness-collective-flip/std", "pallet-message-queue/std", diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index a7c06103c4a..77bc93fb968 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -588,7 +588,7 @@ impl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> pallet_contracts_primitives::ContractExecResult { + ) -> pallet_contracts::ContractExecResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_call( origin, @@ -608,10 +608,10 @@ impl_runtime_apis! { value: Balance, gas_limit: Option, storage_deposit_limit: Option, - code: pallet_contracts_primitives::Code, + code: pallet_contracts::Code, data: Vec, salt: Vec, - ) -> pallet_contracts_primitives::ContractInstantiateResult { + ) -> pallet_contracts::ContractInstantiateResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_instantiate( origin, @@ -631,7 +631,7 @@ impl_runtime_apis! { code: Vec, storage_deposit_limit: Option, determinism: pallet_contracts::Determinism, - ) -> pallet_contracts_primitives::CodeUploadResult { + ) -> pallet_contracts::CodeUploadResult { Contracts::bare_upload_code( origin, code, @@ -643,7 +643,7 @@ impl_runtime_apis! { fn get_storage( address: AccountId, key: Vec, - ) -> pallet_contracts_primitives::GetStorageResult { + ) -> pallet_contracts::GetStorageResult { Contracts::get_storage(address, key) } } diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index 2414358b60b..cb4970e7c9d 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -74,7 +74,6 @@ pallet-broker = { path = "../../../frame/broker", default-features = false} pallet-child-bounties = { path = "../../../frame/child-bounties", default-features = false} pallet-collective = { path = "../../../frame/collective", default-features = false} pallet-contracts = { path = "../../../frame/contracts", default-features = false} -pallet-contracts-primitives = { path = "../../../frame/contracts/primitives", default-features = false} pallet-conviction-voting = { path = "../../../frame/conviction-voting", default-features = false} pallet-core-fellowship = { path = "../../../frame/core-fellowship", default-features = false} pallet-democracy = { path = "../../../frame/democracy", default-features = false} @@ -171,7 +170,6 @@ std = [ "pallet-broker/std", "pallet-child-bounties/std", "pallet-collective/std", - "pallet-contracts-primitives/std", "pallet-contracts/std", "pallet-conviction-voting/std", "pallet-core-fellowship/std", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index d7beb29becf..e2746b1c35b 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2497,7 +2497,7 @@ impl_runtime_apis! { gas_limit: Option, storage_deposit_limit: Option, input_data: Vec, - ) -> pallet_contracts_primitives::ContractExecResult { + ) -> pallet_contracts::ContractExecResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_call( origin, @@ -2517,10 +2517,10 @@ impl_runtime_apis! { value: Balance, gas_limit: Option, storage_deposit_limit: Option, - code: pallet_contracts_primitives::Code, + code: pallet_contracts::Code, data: Vec, salt: Vec, - ) -> pallet_contracts_primitives::ContractInstantiateResult + ) -> pallet_contracts::ContractInstantiateResult { let gas_limit = gas_limit.unwrap_or(RuntimeBlockWeights::get().max_block); Contracts::bare_instantiate( @@ -2541,7 +2541,7 @@ impl_runtime_apis! { code: Vec, storage_deposit_limit: Option, determinism: pallet_contracts::Determinism, - ) -> pallet_contracts_primitives::CodeUploadResult + ) -> pallet_contracts::CodeUploadResult { Contracts::bare_upload_code( origin, @@ -2554,7 +2554,7 @@ impl_runtime_apis! { fn get_storage( address: AccountId, key: Vec, - ) -> pallet_contracts_primitives::GetStorageResult { + ) -> pallet_contracts::GetStorageResult { Contracts::get_storage( address, key diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 239b0865e0f..417f719d803 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -40,7 +40,7 @@ frame-benchmarking = { path = "../benchmarking", default-features = false, optio frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} pallet-balances = { path = "../balances", default-features = false , optional = true} -pallet-contracts-primitives = { path = "primitives", default-features = false} +pallet-contracts-uapi = { path = "uapi" } pallet-contracts-proc-macro = { path = "proc-macro" } sp-api = { path = "../../primitives/api", default-features = false} sp-core = { path = "../../primitives/core", default-features = false} @@ -83,8 +83,6 @@ std = [ "frame-system/std", "log/std", "pallet-balances?/std", - "pallet-contracts-fixtures/std", - "pallet-contracts-primitives/std", "pallet-contracts-proc-macro/full", "pallet-insecure-randomness-collective-flip/std", "pallet-proxy/std", diff --git a/substrate/frame/contracts/build.rs b/substrate/frame/contracts/build.rs index 42bc45d563d..83d5d368d4b 100644 --- a/substrate/frame/contracts/build.rs +++ b/substrate/frame/contracts/build.rs @@ -68,6 +68,5 @@ fn main() -> Result<(), Box> { version - 1, )?; - println!("cargo:rerun-if-changed=src/migration"); Ok(()) } diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index b44f36f2a5f..257e01e50fa 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -9,10 +9,16 @@ description = "Fixtures for testing contracts pallet." [dependencies] wat = "1" -frame-system = { path = "../../system", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} +frame-system = { path = "../../system" } +sp-runtime = { path = "../../../primitives/runtime" } +anyhow = "1.0.0" + +[build-dependencies] +parity-wasm = "0.45.0" +tempfile = "3.8.1" +toml = "0.8.8" +twox-hash = "1.6.3" +anyhow = "1.0.0" +cfg-if = { version = "1.0", default-features = false } -[features] -default = [ "std" ] -std = [ "frame-system/std", "sp-runtime/std" ] diff --git a/substrate/frame/contracts/fixtures/build.rs b/substrate/frame/contracts/fixtures/build.rs new file mode 100644 index 00000000000..ada2650c6db --- /dev/null +++ b/substrate/frame/contracts/fixtures/build.rs @@ -0,0 +1,277 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Compile contracts to wasm and RISC-V binaries. +use anyhow::Result; +use parity_wasm::elements::{deserialize_file, serialize_to_file, Internal}; +use std::{ + env, fs, + hash::Hasher, + path::{Path, PathBuf}, + process::Command, +}; +use twox_hash::XxHash32; + +/// Read the file at `path` and return its hash as a hex string. +fn file_hash(path: &Path) -> String { + let data = fs::read(path).expect("file exists; qed"); + let mut hasher = XxHash32::default(); + hasher.write(&data); + hasher.write(include_bytes!("build.rs")); + let hash = hasher.finish(); + format!("{:x}", hash) +} + +/// A contract entry. +struct Entry { + /// The path to the contract source file. + path: PathBuf, + /// The hash of the contract source file. + hash: String, +} + +impl Entry { + /// Create a new contract entry from the given path. + fn new(path: PathBuf) -> Self { + let hash = file_hash(&path); + Self { path, hash } + } + + /// Return the path to the contract source file. + fn path(&self) -> &str { + self.path.to_str().expect("path is valid unicode; qed") + } + + /// Return the name of the contract. + fn name(&self) -> &str { + self.path + .file_stem() + .expect("file exits; qed") + .to_str() + .expect("name is valid unicode; qed") + } + + /// Return the name of the output wasm file. + fn out_wasm_filename(&self) -> String { + format!("{}.wasm", self.name()) + } +} + +/// Collect all contract entries from the given source directory. +/// Contracts that have already been compiled are filtered out. +fn collect_entries(contracts_dir: &Path, out_dir: &Path) -> Vec { + fs::read_dir(&contracts_dir) + .expect("src dir exists; qed") + .filter_map(|file| { + let path = file.expect("file exists; qed").path(); + if path.extension().map_or(true, |ext| ext != "rs") { + return None; + } + + let entry = Entry::new(path); + if out_dir.join(&entry.hash).exists() { + None + } else { + Some(entry) + } + }) + .collect::>() +} + +/// Create a `Cargo.toml` to compile the given contract entries. +fn create_cargo_toml<'a>( + fixtures_dir: &Path, + entries: impl Iterator, + output_dir: &Path, +) -> Result<()> { + let uapi_path = fixtures_dir.join("../uapi").canonicalize()?; + let common_path = fixtures_dir.join("./contracts/common").canonicalize()?; + let mut cargo_toml: toml::Value = toml::from_str(&format!( + " +[package] +name = 'contracts' +version = '0.1.0' +edition = '2021' + +# Binary targets are injected below. +[[bin]] + +[dependencies] +uapi = {{ package = 'pallet-contracts-uapi', default-features = false, path = {uapi_path:?}}} +common = {{ package = 'pallet-contracts-fixtures-common', path = {common_path:?}}} + +[profile.release] +opt-level = 3 +lto = true +codegen-units = 1 +" + ))?; + + let binaries = entries + .map(|entry| { + let name = entry.name(); + let path = entry.path(); + toml::Value::Table(toml::toml! { + name = name + path = path + }) + }) + .collect::>(); + + cargo_toml["bin"] = toml::Value::Array(binaries); + let cargo_toml = toml::to_string_pretty(&cargo_toml)?; + fs::write(output_dir.join("Cargo.toml"), cargo_toml).map_err(Into::into) +} + +/// Invoke `cargo fmt` to check that fixtures files are formatted. +fn invoke_cargo_fmt<'a>( + config_path: &Path, + files: impl Iterator, + contract_dir: &Path, +) -> Result<()> { + // If rustfmt is not installed, skip the check. + if !Command::new("rustup") + .args(&["run", "nightly", "rustfmt", "--version"]) + .output() + .expect("failed to execute process") + .status + .success() + { + return Ok(()) + } + + let fmt_res = Command::new("rustup") + .args(&["run", "nightly", "rustfmt", "--check", "--config-path"]) + .arg(config_path) + .args(files) + .output() + .expect("failed to execute process"); + + if fmt_res.status.success() { + return Ok(()) + } + + let stdout = String::from_utf8_lossy(&fmt_res.stdout); + let stderr = String::from_utf8_lossy(&fmt_res.stderr); + eprintln!("{}\n{}", stdout, stderr); + eprintln!( + "Fixtures files are not formatted.\nPlease run `rustup run nightly rustfmt --config-path {} {}/*.rs`", + config_path.display(), + contract_dir.display() + ); + anyhow::bail!("Fixtures files are not formatted") +} + +/// Invoke `cargo build` to compile the contracts. +fn invoke_build(current_dir: &Path) -> Result<()> { + let encoded_rustflags = [ + "-Clink-arg=-zstack-size=65536", + "-Clink-arg=--import-memory", + "-Clinker-plugin-lto", + "-Ctarget-cpu=mvp", + "-Dwarnings", + ] + .join("\x1f"); + + let build_res = Command::new(env::var("CARGO")?) + .current_dir(current_dir) + .env("CARGO_ENCODED_RUSTFLAGS", encoded_rustflags) + .args(&["build", "--release", "--target=wasm32-unknown-unknown"]) + .output() + .expect("failed to execute process"); + + if build_res.status.success() { + return Ok(()) + } + + let stderr = String::from_utf8_lossy(&build_res.stderr); + eprintln!("{}", stderr); + anyhow::bail!("Failed to build contracts"); +} + +/// Post-process the compiled wasm contracts. +fn post_process_wasm(input_path: &Path, output_path: &Path) -> Result<()> { + let mut module = deserialize_file(input_path)?; + if let Some(section) = module.export_section_mut() { + section.entries_mut().retain(|entry| { + matches!(entry.internal(), Internal::Function(_)) && + (entry.field() == "call" || entry.field() == "deploy") + }); + } + + serialize_to_file(output_path, module).map_err(Into::into) +} + +/// Write the compiled contracts to the given output directory. +fn write_output(build_dir: &Path, out_dir: &Path, entries: Vec) -> Result<()> { + for entry in entries { + let wasm_output = entry.out_wasm_filename(); + post_process_wasm( + &build_dir.join("target/wasm32-unknown-unknown/release").join(&wasm_output), + &out_dir.join(&wasm_output), + )?; + fs::write(out_dir.join(&entry.hash), "")?; + } + + Ok(()) +} + +/// Returns the root path of the wasm workspace. +fn find_workspace_root(current_dir: &Path) -> Option { + let mut current_dir = current_dir.to_path_buf(); + + while current_dir.parent().is_some() { + if current_dir.join("Cargo.toml").exists() { + let cargo_toml_contents = + std::fs::read_to_string(current_dir.join("Cargo.toml")).ok()?; + if cargo_toml_contents.contains("[workspace]") { + return Some(current_dir); + } + } + + current_dir.pop(); + } + + None +} + +fn main() -> Result<()> { + let fixtures_dir: PathBuf = env::var("CARGO_MANIFEST_DIR")?.into(); + let contracts_dir = fixtures_dir.join("contracts"); + let out_dir: PathBuf = env::var("OUT_DIR")?.into(); + let workspace_root = find_workspace_root(&fixtures_dir).expect("workspace root exists; qed"); + + let entries = collect_entries(&contracts_dir, &out_dir); + if entries.is_empty() { + return Ok(()); + } + + let tmp_dir = tempfile::tempdir()?; + let tmp_dir_path = tmp_dir.path(); + + create_cargo_toml(&fixtures_dir, entries.iter(), tmp_dir.path())?; + invoke_cargo_fmt( + &workspace_root.join(".rustfmt.toml"), + entries.iter().map(|entry| &entry.path as _), + &contracts_dir, + )?; + + invoke_build(tmp_dir_path)?; + write_output(tmp_dir_path, &out_dir, entries)?; + + Ok(()) +} diff --git a/substrate/frame/contracts/fixtures/contracts/call.rs b/substrate/frame/contracts/fixtures/contracts/call.rs new file mode 100644 index 00000000000..396b71d5e96 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/call.rs @@ -0,0 +1,48 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This calls another contract as passed as its account id. +#![no_std] +#![no_main] + +extern crate common; +use uapi::{CallFlags, HostFn, HostFnImpl as api}; + +#[no_mangle] +pub extern "C" fn deploy() {} + +#[no_mangle] +pub extern "C" fn call() { + let mut buffer = [0u8; 40]; + let callee_input = 0..4; + let callee_addr = 4..36; + let value = 36..40; + + // Read the input data. + api::input(&mut &mut buffer[..]); + + // Call the callee + api::call_v1( + CallFlags::empty(), + &buffer[callee_addr], + 0u64, // How much gas to devote for the execution. 0 = all. + &buffer[value], + &buffer[callee_input], + None, + ) + .unwrap(); +} diff --git a/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml b/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml new file mode 100644 index 00000000000..b70db7cde45 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "pallet-contracts-fixtures-common" +publish = false +version = "1.0.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +description = "Common utilities for pallet-contracts-fixtures." + diff --git a/substrate/frame/contracts/fixtures/contracts/common/src/lib.rs b/substrate/frame/contracts/fixtures/contracts/common/src/lib.rs new file mode 100644 index 00000000000..29bdbfbb042 --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/common/src/lib.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![no_std] +#![cfg(any(target_arch = "wasm32", target_arch = "riscv32"))] + +#[panic_handler] +fn panic(_info: &core::panic::PanicInfo) -> ! { + #[cfg(target_arch = "wasm32")] + core::arch::wasm32::unreachable(); + + #[cfg(target_arch = "riscv32")] + // Safety: The unimp instruction is guaranteed to trap + unsafe { + core::arch::asm!("unimp"); + core::hint::unreachable_unchecked(); + } +} diff --git a/substrate/frame/contracts/fixtures/contracts/dummy.rs b/substrate/frame/contracts/fixtures/contracts/dummy.rs new file mode 100644 index 00000000000..98b9d494bbc --- /dev/null +++ b/substrate/frame/contracts/fixtures/contracts/dummy.rs @@ -0,0 +1,26 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#![no_std] +#![no_main] + +extern crate common; + +#[no_mangle] +pub extern "C" fn deploy() {} + +#[no_mangle] +pub extern "C" fn call() {} diff --git a/substrate/frame/contracts/fixtures/data/call.wat b/substrate/frame/contracts/fixtures/data/call.wat deleted file mode 100644 index 43b32049c88..00000000000 --- a/substrate/frame/contracts/fixtures/data/call.wat +++ /dev/null @@ -1,39 +0,0 @@ -;; This calls another contract as passed as its account id. -(module - (import "seal0" "seal_input" (func $seal_input (param i32 i32))) - (import "seal1" "seal_call" (func $seal_call (param i32 i32 i64 i32 i32 i32 i32 i32) (result i32))) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (local.get 0) - ) - (unreachable) - ) - ) - - (func (export "deploy")) - - (func (export "call") - ;; Store length of input buffer. - (i32.store (i32.const 0) (i32.const 512)) - - ;; Copy input at address 4. - (call $seal_input (i32.const 4) (i32.const 0)) - - ;; Call passed contract. - (call $assert (i32.eqz - (call $seal_call - (i32.const 0) ;; No flags - (i32.const 8) ;; Pointer to "callee" address. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 512) ;; Pointer to the buffer with value to transfer - (i32.const 4) ;; Pointer to input data buffer address - (i32.const 4) ;; Length of input data buffer - (i32.const 4294967295) ;; u32 max value is the sentinel value: do not copy output - (i32.const 0) ;; Length is ignored in this case - ) - )) - ) -) diff --git a/substrate/frame/contracts/fixtures/data/dummy.wat b/substrate/frame/contracts/fixtures/data/dummy.wat deleted file mode 100644 index a6435e49df2..00000000000 --- a/substrate/frame/contracts/fixtures/data/dummy.wat +++ /dev/null @@ -1,6 +0,0 @@ -;; A valid contract which does nothing at all -(module - (import "env" "memory" (memory 1 1)) - (func (export "deploy")) - (func (export "call")) -) diff --git a/substrate/frame/contracts/fixtures/src/lib.rs b/substrate/frame/contracts/fixtures/src/lib.rs index 48117f7ca94..fbc2647709d 100644 --- a/substrate/frame/contracts/fixtures/src/lib.rs +++ b/substrate/frame/contracts/fixtures/src/lib.rs @@ -16,9 +16,9 @@ // limitations under the License. use sp_runtime::traits::Hash; -use std::{env::var, path::PathBuf}; +use std::{env::var, fs, path::PathBuf}; -fn fixtures_root_dir() -> PathBuf { +fn wat_root_dir() -> PathBuf { match (var("CARGO_MANIFEST_DIR"), var("CARGO_PKG_NAME")) { // When `CARGO_MANIFEST_DIR` is not set, Rust resolves relative paths from the root folder (Err(_), _) => "substrate/frame/contracts/fixtures/data".into(), @@ -33,12 +33,44 @@ fn fixtures_root_dir() -> PathBuf { /// with it's hash. /// /// The fixture files are located under the `fixtures/` directory. -pub fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::Output)> +fn legacy_compile_module( + fixture_name: &str, +) -> anyhow::Result<(Vec, ::Output)> where T: frame_system::Config, { - let fixture_path = fixtures_root_dir().join(format!("{fixture_name}.wat")); + let fixture_path = wat_root_dir().join(format!("{fixture_name}.wat")); let wasm_binary = wat::parse_file(fixture_path)?; let code_hash = T::Hashing::hash(&wasm_binary); Ok((wasm_binary, code_hash)) } + +/// Load a given wasm module and returns a wasm binary contents along with it's hash. +/// Use the legacy compile_module as fallback, if the rust fixture does not exist yet. +pub fn compile_module( + fixture_name: &str, +) -> anyhow::Result<(Vec, ::Output)> +where + T: frame_system::Config, +{ + let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let fixture_path = out_dir.join(format!("{fixture_name}.wasm")); + match fs::read(fixture_path) { + Ok(wasm_binary) => { + let code_hash = T::Hashing::hash(&wasm_binary); + Ok((wasm_binary, code_hash)) + }, + Err(_) => legacy_compile_module::(fixture_name), + } +} + +#[cfg(test)] +mod test { + #[test] + fn out_dir_should_have_compiled_mocks() { + let out_dir: std::path::PathBuf = env!("OUT_DIR").into(); + let dummy_wasm = out_dir.join("dummy.wasm"); + println!("dummy_wasm: {:?}", dummy_wasm); + assert!(dummy_wasm.exists()); + } +} diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index 9d5fe1aaf4e..28e24dd15e4 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -16,7 +16,7 @@ frame-system = { path = "../../system", default-features = false} pallet-assets = { path = "../../assets" } pallet-balances = { path = "../../balances" } pallet-contracts = { path = ".." } -pallet-contracts-primitives = { path = "../primitives", default-features = false} +pallet-contracts-uapi = { path = "../uapi", default-features = false} pallet-contracts-proc-macro = { path = "../proc-macro" } pallet-insecure-randomness-collective-flip = { path = "../../insecure-randomness-collective-flip" } pallet-message-queue = { path = "../../message-queue" } @@ -52,7 +52,6 @@ std = [ "frame-support/std", "frame-system/std", "pallet-balances/std", - "pallet-contracts-primitives/std", "pallet-contracts-proc-macro/full", "pallet-contracts/std", "pallet-insecure-randomness-collective-flip/std", diff --git a/substrate/frame/contracts/mock-network/src/tests.rs b/substrate/frame/contracts/mock-network/src/tests.rs index 5193f657055..a66b2b08019 100644 --- a/substrate/frame/contracts/mock-network/src/tests.rs +++ b/substrate/frame/contracts/mock-network/src/tests.rs @@ -29,9 +29,8 @@ use frame_support::{ traits::{fungibles::Mutate, Currency}, }; use pallet_balances::{BalanceLock, Reasons}; -use pallet_contracts::{CollectEvents, DebugInfo, Determinism}; +use pallet_contracts::{Code, CollectEvents, DebugInfo, Determinism}; use pallet_contracts_fixtures::compile_module; -use pallet_contracts_primitives::Code; use xcm::{v3::prelude::*, VersionedMultiLocation, VersionedXcm}; use xcm_simulator::TestExt; diff --git a/substrate/frame/contracts/primitives/Cargo.toml b/substrate/frame/contracts/primitives/Cargo.toml deleted file mode 100644 index 0394841aa1f..00000000000 --- a/substrate/frame/contracts/primitives/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "pallet-contracts-primitives" -version = "24.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" -repository.workspace = true -description = "A crate that hosts a common definitions that are relevant for the pallet-contracts." -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -bitflags = "1.0" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } - -# Substrate Dependencies (This crate should not rely on frame) -sp-std = { path = "../../../primitives/std", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-weights = { path = "../../../primitives/weights", default-features = false} - -[features] -default = [ "std" ] -std = [ - "codec/std", - "scale-info/std", - "sp-runtime/std", - "sp-std/std", - "sp-weights/std", -] diff --git a/substrate/frame/contracts/primitives/README.md b/substrate/frame/contracts/primitives/README.md deleted file mode 100644 index c84cfbfe1a8..00000000000 --- a/substrate/frame/contracts/primitives/README.md +++ /dev/null @@ -1,3 +0,0 @@ -A crate that hosts a common definitions that are relevant for the pallet-contracts. - -License: Apache-2.0 diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index ad9cd2dadec..4ef02497b8e 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -271,7 +271,7 @@ impl HostFn { // process return type let msg = r#"Should return one of the following: - Result<(), TrapReason>, - - Result, + - Result, - Result, - Result"#; let ret_ty = match item.clone().sig.output { @@ -336,7 +336,7 @@ impl HostFn { "()" => Ok(HostFnReturn::Unit), "u32" => Ok(HostFnReturn::U32), "u64" => Ok(HostFnReturn::U64), - "ReturnCode" => Ok(HostFnReturn::ReturnCode), + "ReturnErrorCode" => Ok(HostFnReturn::ReturnCode), _ => Err(err(arg1.span(), &msg)), }?; @@ -550,7 +550,7 @@ fn expand_env(def: &EnvDef, docs: bool) -> TokenStream2 { /// consumed by humans through rustdoc. #[cfg(doc)] pub mod api_doc { - use super::{TrapReason, ReturnCode}; + use super::{TrapReason, ReturnErrorCode}; #docs } } @@ -767,7 +767,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) /// #[define_env] /// pub mod some_env { /// #[version(2)] -/// fn foo(ctx: _, memory: _, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// fn foo(ctx: _, memory: _, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { /// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) /// } /// @@ -793,7 +793,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) /// pub mod some_env { /// #[version(1)] /// #[prefixed_alias] -/// fn foo(ctx: _, memory: _, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { +/// fn foo(ctx: _, memory: _, key_ptr: u32, value_ptr: u32, value_len: u32) -> Result { /// ctx.some_host_fn(KeyType::Fix, key_ptr, value_ptr, value_len).map(|_| ()) /// } /// @@ -811,7 +811,7 @@ fn expand_functions(def: &EnvDef, expand_blocks: bool, host_state: TokenStream2) /// /// Only following return types are allowed for the host functions defined with the macro: /// - `Result<(), TrapReason>`, -/// - `Result`, +/// - `Result`, /// - `Result`. /// /// The macro expands to `pub struct Env` declaration, with the following traits implementations: diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index ac5787e2340..c532b354ab6 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -33,7 +33,6 @@ use crate::{ migration::{ codegen::LATEST_MIGRATION_VERSION, v09, v10, v11, v12, v13, v14, v15, MigrationStep, }, - wasm::CallFlags, Pallet as Contracts, *, }; use codec::{Encode, MaxEncodedLen}; @@ -46,6 +45,7 @@ use frame_support::{ }; use frame_system::RawOrigin; use pallet_balances; +use pallet_contracts_uapi::CallFlags; use sp_runtime::traits::{Bounded, Hash}; use sp_std::prelude::*; use wasm_instrument::parity_wasm::elements::{BlockType, Instruction, ValueType}; diff --git a/substrate/frame/contracts/src/chain_extension.rs b/substrate/frame/contracts/src/chain_extension.rs index 664504d207f..8a7243d6bb3 100644 --- a/substrate/frame/contracts/src/chain_extension.rs +++ b/substrate/frame/contracts/src/chain_extension.rs @@ -81,7 +81,7 @@ use sp_std::{marker::PhantomData, vec::Vec}; pub use crate::{exec::Ext, gas::ChargedAmount, storage::meter::Diff, Config}; pub use frame_system::Config as SysConfig; -pub use pallet_contracts_primitives::ReturnFlags; +pub use pallet_contracts_uapi::ReturnFlags; /// Result that returns a [`DispatchError`] on error. pub type Result = sp_std::result::Result; diff --git a/substrate/frame/contracts/src/debug.rs b/substrate/frame/contracts/src/debug.rs index e22a841e6fb..6cdca7aa4c7 100644 --- a/substrate/frame/contracts/src/debug.rs +++ b/substrate/frame/contracts/src/debug.rs @@ -15,9 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub use crate::exec::{ExecResult, ExportedFunction}; +pub use crate::{ + exec::{ExecResult, ExportedFunction}, + primitives::ExecReturnValue, +}; use crate::{Config, LOG_TARGET}; -pub use pallet_contracts_primitives::ExecReturnValue; /// Umbrella trait for all interfaces that serves for debugging. pub trait Debugger: Tracing + CallInterceptor {} diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index c26d82f7f11..2183d6b96cc 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -18,6 +18,7 @@ use crate::{ debug::{CallInterceptor, CallSpan, Tracing}, gas::GasMeter, + primitives::{ExecReturnValue, StorageDeposit}, storage::{self, meter::Diff, WriteOutcome}, BalanceOf, CodeHash, CodeInfo, CodeInfoOf, Config, ContractInfo, ContractInfoOf, DebugBufferVec, Determinism, Error, Event, Nonce, Origin, Pallet as Contracts, Schedule, @@ -37,7 +38,6 @@ use frame_support::{ Blake2_128Concat, BoundedVec, StorageHasher, }; use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; -use pallet_contracts_primitives::{ExecReturnValue, StorageDeposit}; use smallvec::{Array, SmallVec}; use sp_core::{ ecdsa::Public as ECDSAPublic, @@ -1618,7 +1618,7 @@ mod tests { use codec::{Decode, Encode}; use frame_support::{assert_err, assert_ok, parameter_types}; use frame_system::{EventRecord, Phase}; - use pallet_contracts_primitives::ReturnFlags; + use pallet_contracts_uapi::ReturnFlags; use pretty_assertions::assert_eq; use sp_runtime::{traits::Hash, DispatchError}; use std::{cell::RefCell, collections::hash_map::HashMap, rc::Rc}; diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 188679dbf49..a15006e6388 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -91,6 +91,9 @@ mod address; mod benchmarking; mod exec; mod gas; +mod primitives; +pub use primitives::*; + mod schedule; mod storage; mod wasm; @@ -128,11 +131,6 @@ use frame_system::{ pallet_prelude::{BlockNumberFor, OriginFor}, EventRecord, Pallet as System, }; -use pallet_contracts_primitives::{ - Code, CodeUploadResult, CodeUploadReturnValue, ContractAccessError, ContractExecResult, - ContractInstantiateResult, ContractResult, ExecReturnValue, GetStorageResult, - InstantiateReturnValue, StorageDeposit, -}; use scale_info::TypeInfo; use smallvec::Array; use sp_runtime::{ diff --git a/substrate/frame/contracts/primitives/src/lib.rs b/substrate/frame/contracts/src/primitives.rs similarity index 96% rename from substrate/frame/contracts/primitives/src/lib.rs rename to substrate/frame/contracts/src/primitives.rs index c3314928500..ab73b28e8c4 100644 --- a/substrate/frame/contracts/primitives/src/lib.rs +++ b/substrate/frame/contracts/src/primitives.rs @@ -17,17 +17,15 @@ //! A crate that hosts a common definitions that are relevant for the pallet-contracts. -#![cfg_attr(not(feature = "std"), no_std)] - -use bitflags::bitflags; use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::weights::Weight; +use pallet_contracts_uapi::ReturnFlags; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, DispatchError, RuntimeDebug, }; use sp_std::prelude::*; -use sp_weights::Weight; /// Result type of a `bare_call` or `bare_instantiate` call as well as `ContractsApi::call` and /// `ContractsApi::instantiate`. @@ -109,15 +107,6 @@ pub enum ContractAccessError { MigrationInProgress, } -bitflags! { - /// Flags used by a contract to customize exit behaviour. - #[derive(Encode, Decode, TypeInfo)] - pub struct ReturnFlags: u32 { - /// If this bit is set all changes made by the contract execution are rolled back. - const REVERT = 0x0000_0001; - } -} - /// Output of a contract call or instantiation which ran to completion. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ExecReturnValue { diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index 79ad451964d..4f63104ef26 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -30,9 +30,10 @@ use crate::{ }, exec::{Frame, Key}, migration::codegen::LATEST_MIGRATION_VERSION, + primitives::CodeUploadReturnValue, storage::DeletionQueueManager, tests::test_utils::{get_contract, get_contract_checked}, - wasm::{Determinism, ReturnCode as RuntimeReturnCode}, + wasm::{Determinism, ReturnErrorCode as RuntimeReturnCode}, weights::WeightInfo, BalanceOf, Code, CodeHash, CodeInfoOf, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo, DefaultAddressGenerator, DeletionQueueCounter, Error, HoldReason, @@ -55,7 +56,6 @@ use frame_support::{ }; use frame_system::{EventRecord, Phase}; use pallet_contracts_fixtures::compile_module; -use pallet_contracts_primitives::CodeUploadReturnValue; use pretty_assertions::{assert_eq, assert_ne}; use sp_core::ByteArray; use sp_io::hashing::blake2_256; diff --git a/substrate/frame/contracts/src/tests/test_debug.rs b/substrate/frame/contracts/src/tests/test_debug.rs index 2d7ed474365..c9b6557bbb9 100644 --- a/substrate/frame/contracts/src/tests/test_debug.rs +++ b/substrate/frame/contracts/src/tests/test_debug.rs @@ -18,10 +18,10 @@ use super::*; use crate::{ debug::{CallInterceptor, CallSpan, ExecResult, ExportedFunction, Tracing}, + primitives::ExecReturnValue, AccountIdOf, }; use frame_support::traits::Currency; -use pallet_contracts_primitives::ExecReturnValue; use pretty_assertions::assert_eq; use std::cell::RefCell; diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index f73655e9920..448c0dd7496 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -24,13 +24,13 @@ mod runtime; #[cfg(doc)] pub use crate::wasm::runtime::api_doc; -#[cfg(test)] -pub use tests::MockExt; - pub use crate::wasm::runtime::{ - AllowDeprecatedInterface, AllowUnstableInterface, CallFlags, Environment, ReturnCode, Runtime, + AllowDeprecatedInterface, AllowUnstableInterface, Environment, ReturnErrorCode, Runtime, RuntimeCosts, }; +pub use pallet_contracts_uapi::ReturnFlags; +#[cfg(test)] +pub use tests::MockExt; use crate::{ exec::{ExecResult, Executable, ExportedFunction, Ext}, @@ -436,6 +436,7 @@ mod tests { use crate::{ exec::{AccountIdOf, ErrorOrigin, ExecError, Executable, Ext, Key, SeedOf}, gas::GasMeter, + primitives::ExecReturnValue, storage::WriteOutcome, tests::{RuntimeCall, Test, ALICE, BOB}, BalanceOf, CodeHash, Error, Origin, Pallet as Contracts, @@ -445,7 +446,7 @@ mod tests { assert_err, assert_ok, dispatch::DispatchResultWithPostInfo, weights::Weight, }; use frame_system::pallet_prelude::BlockNumberFor; - use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; + use pallet_contracts_uapi::ReturnFlags; use pretty_assertions::assert_eq; use sp_core::H256; use sp_runtime::DispatchError; @@ -2739,7 +2740,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::KeyNotFound as u32 + ReturnErrorCode::KeyNotFound as u32 ); // value exists @@ -2747,7 +2748,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::Success as u32 + ReturnErrorCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()).unwrap(), &[42u8]); assert_eq!(&result.data[4..], &[42u8]); @@ -2757,7 +2758,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::Success as u32 + ReturnErrorCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), Some(&vec![])); assert_eq!(&result.data[4..], &([] as [u8; 0])); @@ -2920,7 +2921,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::KeyNotFound as u32 + ReturnErrorCode::KeyNotFound as u32 ); // value did exist -> value returned @@ -2928,7 +2929,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::Success as u32 + ReturnErrorCode::Success as u32 ); assert_eq!(ext.storage.get(&[1u8; 64].to_vec()), None); assert_eq!(&result.data[4..], &[42u8]); @@ -2938,7 +2939,7 @@ mod tests { let result = execute(CODE, input, &mut ext).unwrap(); assert_eq!( u32::from_le_bytes(result.data[0..4].try_into().unwrap()), - ReturnCode::Success as u32 + ReturnErrorCode::Success as u32 ); assert_eq!(ext.storage.get(&[2u8; 19].to_vec()), None); assert_eq!(&result.data[4..], &[0u8; 0]); diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index b3013adb790..871ef05c37e 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -20,10 +20,10 @@ use crate::{ exec::{ExecError, ExecResult, Ext, Key, TopicOf}, gas::{ChargedAmount, Token}, + primitives::ExecReturnValue, schedule::HostFnWeights, BalanceOf, CodeHash, Config, DebugBufferVec, Error, SENTINEL, }; -use bitflags::bitflags; use codec::{Decode, DecodeLimit, Encode, MaxEncodedLen}; use frame_support::{ dispatch::DispatchInfo, @@ -33,8 +33,8 @@ use frame_support::{ traits::Get, weights::Weight, }; -use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pallet_contracts_proc_macro::define_env; +use pallet_contracts_uapi::{CallFlags, ReturnFlags}; use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::{ traits::{Bounded, Zero}, @@ -87,55 +87,16 @@ enum KeyType { Var(u32), } -/// Every error that can be returned to a contract when it calls any of the host functions. -/// -/// # Note -/// -/// This enum can be extended in the future: New codes can be added but existing codes -/// will not be changed or removed. This means that any contract **must not** exhaustively -/// match return codes. Instead, contracts should prepare for unknown variants and deal with -/// those errors gracefully in order to be forward compatible. -#[derive(Debug)] -#[repr(u32)] -pub enum ReturnCode { - /// API call successful. - Success = 0, - /// The called function trapped and has its state changes reverted. - /// In this case no output buffer is returned. - CalleeTrapped = 1, - /// The called function ran to completion but decided to revert its state. - /// An output buffer is returned when one was supplied. - CalleeReverted = 2, - /// The passed key does not exist in storage. - KeyNotFound = 3, - /// See [`Error::TransferFailed`]. - TransferFailed = 5, - /// No code could be found at the supplied code hash. - CodeNotFound = 7, - /// The contract that was called is no contract (a plain account). - NotCallable = 8, - /// The call dispatched by `seal_call_runtime` was executed but returned an error. - CallRuntimeFailed = 10, - /// ECDSA pubkey recovery failed (most probably wrong recovery id or signature), or - /// ECDSA compressed pubkey conversion into Ethereum address failed (most probably - /// wrong pubkey provided). - EcdsaRecoverFailed = 11, - /// sr25519 signature verification failed. - Sr25519VerifyFailed = 12, - /// The `xcm_execute` call failed. - XcmExecutionFailed = 13, - /// The `xcm_send` call failed. - XcmSendFailed = 14, -} +pub use pallet_contracts_uapi::ReturnErrorCode; parameter_types! { /// Getter types used by [`crate::api_doc::Current::call_runtime`] - const CallRuntimeFailed: ReturnCode = ReturnCode::CallRuntimeFailed; + const CallRuntimeFailed: ReturnErrorCode = ReturnErrorCode::CallRuntimeFailed; /// Getter types used by [`crate::api_doc::Current::xcm_execute`] - const XcmExecutionFailed: ReturnCode = ReturnCode::XcmExecutionFailed; + const XcmExecutionFailed: ReturnErrorCode = ReturnErrorCode::XcmExecutionFailed; } -impl From for ReturnCode { +impl From for ReturnErrorCode { fn from(from: ExecReturnValue) -> Self { if from.flags.contains(ReturnFlags::REVERT) { Self::CalleeReverted @@ -145,12 +106,6 @@ impl From for ReturnCode { } } -impl From for u32 { - fn from(code: ReturnCode) -> u32 { - code as u32 - } -} - /// The data passed through when a contract uses `seal_return`. #[derive(RuntimeDebug)] pub struct ReturnData { @@ -411,52 +366,6 @@ impl Token for RuntimeToken { } } -bitflags! { - /// Flags used to change the behaviour of `seal_call` and `seal_delegate_call`. - pub struct CallFlags: u32 { - /// Forward the input of current function to the callee. - /// - /// Supplied input pointers are ignored when set. - /// - /// # Note - /// - /// A forwarding call will consume the current contracts input. Any attempt to - /// access the input after this call returns will lead to [`Error::InputForwarded`]. - /// It does not matter if this is due to calling `seal_input` or trying another - /// forwarding call. Consider using [`Self::CLONE_INPUT`] in order to preserve - /// the input. - const FORWARD_INPUT = 0b0000_0001; - /// Identical to [`Self::FORWARD_INPUT`] but without consuming the input. - /// - /// This adds some additional weight costs to the call. - /// - /// # Note - /// - /// This implies [`Self::FORWARD_INPUT`] and takes precedence when both are set. - const CLONE_INPUT = 0b0000_0010; - /// Do not return from the call but rather return the result of the callee to the - /// callers caller. - /// - /// # Note - /// - /// This makes the current contract completely transparent to its caller by replacing - /// this contracts potential output by the callee ones. Any code after `seal_call` - /// can be safely considered unreachable. - const TAIL_CALL = 0b0000_0100; - /// Allow the callee to reenter into the current contract. - /// - /// Without this flag any reentrancy into the current contract that originates from - /// the callee (or any of its callees) is denied. This includes the first callee: - /// You cannot call into yourself with this flag set. - /// - /// # Note - /// - /// For `seal_delegate_call` should be always unset, otherwise - /// [`Error::InvalidCallFlags`] is returned. - const ALLOW_REENTRY = 0b0000_1000; - } -} - /// The kind of call that should be performed. enum CallType { /// Execute another instantiated contract @@ -603,20 +512,20 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { /// Charge, Run and adjust gas, for executing the given dispatchable. fn call_dispatchable< - ErrorReturnCode: Get, + ErrorReturnCode: Get, F: FnOnce(&mut Self) -> DispatchResultWithPostInfo, >( &mut self, dispatch_info: DispatchInfo, run: F, - ) -> Result { + ) -> Result { use frame_support::dispatch::extract_actual_weight; let charged = self.charge_gas(RuntimeCosts::CallRuntime(dispatch_info.weight))?; let result = run(self); let actual_weight = extract_actual_weight(&result, &dispatch_info); self.adjust_gas(charged, RuntimeCosts::CallRuntime(actual_weight)); match result { - Ok(_) => Ok(ReturnCode::Success), + Ok(_) => Ok(ReturnErrorCode::Success), Err(e) => { if self.ext.append_debug_buffer("") { self.ext.append_debug_buffer("call failed with: "); @@ -807,9 +716,9 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { Ok(()) } - /// Fallible conversion of `DispatchError` to `ReturnCode`. - fn err_into_return_code(from: DispatchError) -> Result { - use ReturnCode::*; + /// Fallible conversion of `DispatchError` to `ReturnErrorCode`. + fn err_into_return_code(from: DispatchError) -> Result { + use ReturnErrorCode::*; let transfer_failed = Error::::TransferFailed.into(); let no_code = Error::::CodeNotFound.into(); @@ -823,8 +732,8 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { } } - /// Fallible conversion of a `ExecResult` to `ReturnCode`. - fn exec_into_return_code(from: ExecResult) -> Result { + /// Fallible conversion of a `ExecResult` to `ReturnErrorCode`. + fn exec_into_return_code(from: ExecResult) -> Result { use crate::exec::ErrorOrigin::Callee; let ExecError { error, origin } = match from { @@ -833,7 +742,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { }; match (error, origin) { - (_, Callee) => Ok(ReturnCode::CalleeTrapped), + (_, Callee) => Ok(ReturnErrorCode::CalleeTrapped), (err, _) => Self::err_into_return_code(err), } } @@ -907,7 +816,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { key_ptr: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { let charged = self.charge_gas(RuntimeCosts::GetStorage(self.ext.max_value_size()))?; let key = self.decode_key(memory, key_type, key_ptr)?; let outcome = self.ext.get_storage(&key); @@ -922,10 +831,10 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { false, already_charged, )?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } else { self.adjust_gas(charged, RuntimeCosts::GetStorage(0)); - Ok(ReturnCode::KeyNotFound) + Ok(ReturnErrorCode::KeyNotFound) } } @@ -952,7 +861,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { self.charge_gas(call_type.cost())?; let input_data = if flags.contains(CallFlags::CLONE_INPUT) { let input = self.input_data.as_ref().ok_or(Error::::InputForwarded)?; @@ -1036,7 +945,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { output_len_ptr: u32, salt_ptr: u32, salt_len: u32, - ) -> Result { + ) -> Result { self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; let deposit_limit: BalanceOf<::T> = if deposit_ptr == SENTINEL { BalanceOf::<::T>::zero() @@ -1096,10 +1005,7 @@ impl<'a, E: Ext + 'a> Runtime<'a, E> { pub mod env { /// Set the value at the given key in the contract storage. - /// - /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::set_storage`] version with the - /// exception of the return type. Still a valid thing to call when not interested in the return - /// value. + /// See [`pallet_contracts_uapi::HostFn::set_storage`] #[prefixed_alias] fn set_storage( ctx: _, @@ -1112,23 +1018,7 @@ pub mod env { } /// Set the value at the given key in the contract storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// The value length must not exceed the maximum defined by the contracts module parameters. - /// Specifying a `value_len` of zero will store an empty value. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - /// - `value_ptr`: pointer into the linear memory where the value to set is placed. - /// - `value_len`: the length of the value in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::set_storage_v1`] #[version(1)] #[prefixed_alias] fn set_storage( @@ -1142,21 +1032,7 @@ pub mod env { } /// Set the value at the given key in the contract storage. - /// - /// The key and value lengths must not exceed the maximums defined by the contracts module - /// parameters. Specifying a `value_len` of zero will store an empty value. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the location to store the value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `value_ptr`: pointer into the linear memory where the value to set is placed. - /// - `value_len`: the length of the value in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::set_storage_v2`] #[version(2)] #[prefixed_alias] fn set_storage( @@ -1171,26 +1047,14 @@ pub mod env { } /// Clear the value at the given key in the contract storage. - /// - /// Equivalent to the newer [`seal1`][`super::api_doc::Version1::clear_storage`] version with - /// the exception of the return type. Still a valid thing to call when not interested in the - /// return value. + /// See [`pallet_contracts_uapi::HostFn::clear_storage`] #[prefixed_alias] fn clear_storage(ctx: _, memory: _, key_ptr: u32) -> Result<(), TrapReason> { ctx.clear_storage(memory, KeyType::Fix, key_ptr).map(|_| ()) } /// Clear the value at the given key in the contract storage. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key is placed. - /// - `key_len`: the length of the key in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::clear_storage_v1`] #[version(1)] #[prefixed_alias] fn clear_storage(ctx: _, memory: _, key_ptr: u32, key_len: u32) -> Result { @@ -1198,20 +1062,7 @@ pub mod env { } /// Retrieve the value under the given key from storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// `ReturnCode::KeyNotFound` + /// See [`pallet_contracts_uapi::HostFn::get_storage`] #[prefixed_alias] fn get_storage( ctx: _, @@ -1219,28 +1070,12 @@ pub mod env { key_ptr: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.get_storage(memory, KeyType::Fix, key_ptr, out_ptr, out_len_ptr) } /// Retrieve the value under the given key from storage. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// The key length must not exceed the maximum defined by the contracts module parameter. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// - `ReturnCode::KeyNotFound` + /// See [`pallet_contracts_uapi::HostFn::get_storage_v1`] #[version(1)] #[prefixed_alias] fn get_storage( @@ -1250,41 +1085,19 @@ pub mod env { key_len: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.get_storage(memory, KeyType::Var(key_len), key_ptr, out_ptr, out_len_ptr) } /// Checks whether there is a value stored under the given key. - /// - /// This version is to be used with a fixed sized storage key. For runtimes supporting - /// transparent hashing, please use the newer version of this function. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::contains_storage`] #[prefixed_alias] fn contains_storage(ctx: _, memory: _, key_ptr: u32) -> Result { ctx.contains_storage(memory, KeyType::Fix, key_ptr) } /// Checks whether there is a value stored under the given key. - /// - /// The key length must not exceed the maximum defined by the contracts module parameter. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - /// # Return Value - /// - /// Returns the size of the pre-existing value at the specified key if any. Otherwise - /// `SENTINEL` is returned as a sentinel value. + /// See [`pallet_contracts_uapi::HostFn::contains_storage_v1`] #[version(1)] #[prefixed_alias] fn contains_storage(ctx: _, memory: _, key_ptr: u32, key_len: u32) -> Result { @@ -1292,18 +1105,7 @@ pub mod env { } /// Retrieve and remove the value under the given key from storage. - /// - /// # Parameters - /// - /// - `key_ptr`: pointer into the linear memory where the key of the requested value is placed. - /// - `key_len`: the length of the key in bytes. - /// - `out_ptr`: pointer to the linear memory where the value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// - `ReturnCode::KeyNotFound` + /// See [`pallet_contracts_uapi::HostFn::take_storage`] #[prefixed_alias] fn take_storage( ctx: _, @@ -1312,7 +1114,7 @@ pub mod env { key_len: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { let charged = ctx.charge_gas(RuntimeCosts::TakeStorage(ctx.ext.max_value_size()))?; ensure!( key_len <= <::T as Config>::MaxStorageKeyLen::get(), @@ -1326,27 +1128,15 @@ pub mod env { )? { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(value.len() as u32)); ctx.write_sandbox_output(memory, out_ptr, out_len_ptr, &value, false, already_charged)?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } else { ctx.adjust_gas(charged, RuntimeCosts::TakeStorage(0)); - Ok(ReturnCode::KeyNotFound) + Ok(ReturnErrorCode::KeyNotFound) } } /// Transfer some value to another account. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the address of the beneficiary account Should be decodable as - /// an `T::AccountId`. Traps otherwise. - /// - `account_len`: length of the address buffer. - /// - `value_ptr`: a pointer to the buffer with value, how much value to send. Should be - /// decodable as a `T::Balance`. Traps otherwise. - /// - `value_len`: length of the value buffer. - /// - /// # Errors - /// - /// - `ReturnCode::TransferFailed` + /// See [`pallet_contracts_uapi::HostFn::transfer`]. #[prefixed_alias] fn transfer( ctx: _, @@ -1355,14 +1145,14 @@ pub mod env { _account_len: u32, value_ptr: u32, _value_len: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::Transfer)?; let callee: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(memory, account_ptr)?; let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(memory, value_ptr)?; let result = ctx.ext.transfer(&callee, value); match result { - Ok(()) => Ok(ReturnCode::Success), + Ok(()) => Ok(ReturnErrorCode::Success), Err(err) => { let code = Runtime::::err_into_return_code(err)?; Ok(code) @@ -1372,17 +1162,11 @@ pub mod env { /// Make a call to another contract. /// - /// # New version available - /// - /// This is equivalent to calling the newer version of this function with - /// `flags` set to `ALLOW_REENTRY`. See the newer version for documentation. - /// /// # Note /// - /// The values `_callee_len` and `_value_len` are ignored because the encoded sizes - /// of those types are fixed through - /// [`codec::MaxEncodedLen`]. The fields exist - /// for backwards compatibility. Consider switching to the newest version of this function. + /// The values `_callee_len` and `_value_len` are ignored because the encoded sizes of those + /// types are fixed through [`codec::MaxEncodedLen`]. The fields exist for backwards + /// compatibility. Consider switching to the newest version of this function. #[prefixed_alias] fn call( ctx: _, @@ -1396,7 +1180,7 @@ pub mod env { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.call( memory, CallFlags::ALLOW_REENTRY, @@ -1414,10 +1198,7 @@ pub mod env { } /// Make a call to another contract. - /// - /// Equivalent to the newer [`seal2`][`super::api_doc::Version2::call`] version but works with - /// *ref_time* Weight only. It is recommended to switch to the latest version, once it's - /// stabilized. + /// See [`pallet_contracts_uapi::HostFn::call_v1`]. #[version(1)] #[prefixed_alias] fn call( @@ -1431,7 +1212,7 @@ pub mod env { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, @@ -1449,39 +1230,7 @@ pub mod env { } /// Make a call to another contract. - /// - /// The callees output buffer is copied to `output_ptr` and its length to `output_len_ptr`. - /// The copy of the output buffer can be skipped by supplying the sentinel value - /// of `SENTINEL` to `output_ptr`. - /// - /// # Parameters - /// - /// - `flags`: See `crate::wasm::runtime::CallFlags` for a documentation of the supported flags. - /// - `callee_ptr`: a pointer to the address of the callee contract. Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. - /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit_ptr`: a pointer to the buffer with value of the storage deposit limit for the - /// call. Should be decodable as a `T::Balance`. Traps otherwise. Passing `SENTINEL` means - /// setting no specific limit for the call, which implies storage usage up to the limit of the - /// parent call. - /// - `value_ptr`: a pointer to the buffer with value, how much value to send. Should be - /// decodable as a `T::Balance`. Traps otherwise. - /// - `input_data_ptr`: a pointer to a buffer to be used as input data to the callee. - /// - `input_data_len`: length of the input data buffer. - /// - `output_ptr`: a pointer where the output buffer is copied to. - /// - `output_len_ptr`: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - /// # Errors - /// - /// An error means that the call wasn't successful output buffer is returned unless - /// stated otherwise. - /// - /// - `ReturnCode::CalleeReverted`: Output buffer is returned. - /// - `ReturnCode::CalleeTrapped` - /// - `ReturnCode::TransferFailed` - /// - `ReturnCode::NotCallable` + /// See [`pallet_contracts_uapi::HostFn::call_v2`]. #[version(2)] #[unstable] fn call( @@ -1497,7 +1246,7 @@ pub mod env { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, @@ -1515,29 +1264,7 @@ pub mod env { } /// Execute code in the context (storage, caller, value) of the current contract. - /// - /// Reentrancy protection is always disabled since the callee is allowed - /// to modify the callers storage. This makes going through a reentrancy attack - /// unnecessary for the callee when it wants to exploit the caller. - /// - /// # Parameters - /// - /// - `flags`: see `crate::wasm::runtime::CallFlags` for a documentation of the supported flags. - /// - `code_hash`: a pointer to the hash of the code to be called. - /// - `input_data_ptr`: a pointer to a buffer to be used as input data to the callee. - /// - `input_data_len`: length of the input data buffer. - /// - `output_ptr`: a pointer where the output buffer is copied to. - /// - `output_len_ptr`: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - /// # Errors - /// - /// An error means that the call wasn't successful and no output buffer is returned unless - /// stated otherwise. - /// - /// - `ReturnCode::CalleeReverted`: Output buffer is returned. - /// - `ReturnCode::CalleeTrapped` - /// - `ReturnCode::CodeNotFound` + /// See [`pallet_contracts_uapi::HostFn::delegate_call`]. #[prefixed_alias] fn delegate_call( ctx: _, @@ -1548,7 +1275,7 @@ pub mod env { input_data_len: u32, output_ptr: u32, output_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.call( memory, CallFlags::from_bits(flags).ok_or(Error::::InvalidCallFlags)?, @@ -1561,11 +1288,7 @@ pub mod env { } /// Instantiate a contract with the specified code hash. - /// - /// # New version available - /// - /// This is equivalent to calling the newer version of this function. The newer version - /// drops the now unnecessary length fields. + /// See [`pallet_contracts_uapi::HostFn::instantiate`]. /// /// # Note /// @@ -1589,7 +1312,7 @@ pub mod env { output_len_ptr: u32, salt_ptr: u32, salt_len: u32, - ) -> Result { + ) -> Result { ctx.instantiate( memory, code_hash_ptr, @@ -1608,10 +1331,7 @@ pub mod env { } /// Instantiate a contract with the specified code hash. - /// - /// Equivalent to the newer [`seal2`][`super::api_doc::Version2::instantiate`] version but works - /// with *ref_time* Weight only. It is recommended to switch to the latest version, once it's - /// stabilized. + /// See [`pallet_contracts_uapi::HostFn::instantiate_v1`]. #[version(1)] #[prefixed_alias] fn instantiate( @@ -1628,7 +1348,7 @@ pub mod env { output_len_ptr: u32, salt_ptr: u32, salt_len: u32, - ) -> Result { + ) -> Result { ctx.instantiate( memory, code_hash_ptr, @@ -1647,48 +1367,7 @@ pub mod env { } /// Instantiate a contract with the specified code hash. - /// - /// This function creates an account and executes the constructor defined in the code specified - /// by the code hash. The address of this new account is copied to `address_ptr` and its length - /// to `address_len_ptr`. The constructors output buffer is copied to `output_ptr` and its - /// length to `output_len_ptr`. The copy of the output buffer and address can be skipped by - /// supplying the sentinel value of `SENTINEL` to `output_ptr` or `address_ptr`. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: a pointer to the buffer that contains the initializer code. - /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. - /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. - /// - `deposit_ptr`: a pointer to the buffer with value of the storage deposit limit for - /// instantiation. Should be decodable as a `T::Balance`. Traps otherwise. Passing `SENTINEL` - /// means setting no specific limit for the call, which implies storage usage up to the limit - /// of the parent call. - /// - `value_ptr`: a pointer to the buffer with value, how much value to send. Should be - /// decodable as a `T::Balance`. Traps otherwise. - /// - `input_data_ptr`: a pointer to a buffer to be used as input data to the initializer code. - /// - `input_data_len`: length of the input data buffer. - /// - `address_ptr`: a pointer where the new account's address is copied to. `SENTINEL` means - /// not to copy. - /// - `address_len_ptr`: pointer to where put the length of the address. - /// - `output_ptr`: a pointer where the output buffer is copied to. `SENTINEL` means not to - /// copy. - /// - `output_len_ptr`: in-out pointer to where the length of the buffer is read from and the - /// actual length is written to. - /// - `salt_ptr`: Pointer to raw bytes used for address derivation. See `fn contract_address`. - /// - `salt_len`: length in bytes of the supplied salt. - /// - /// # Errors - /// - /// Please consult the `ReturnCode` enum declaration for more information on those - /// errors. Here we only note things specific to this function. - /// - /// An error means that the account wasn't created and no address or output buffer - /// is returned unless stated otherwise. - /// - /// - `ReturnCode::CalleeReverted`: Output buffer is returned. - /// - `ReturnCode::CalleeTrapped` - /// - `ReturnCode::TransferFailed` - /// - `ReturnCode::CodeNotFound` + /// See [`pallet_contracts_uapi::HostFn::instantiate_v2`]. #[version(2)] #[unstable] fn instantiate( @@ -1707,7 +1386,7 @@ pub mod env { output_len_ptr: u32, salt_ptr: u32, salt_len: u32, - ) -> Result { + ) -> Result { ctx.instantiate( memory, code_hash_ptr, @@ -1726,11 +1405,7 @@ pub mod env { } /// Remove the calling account and transfer remaining balance. - /// - /// # New version available - /// - /// This is equivalent to calling the newer version of this function. The newer version - /// drops the now unnecessary length fields. + /// See [`pallet_contracts_uapi::HostFn::terminate`]. /// /// # Note /// @@ -1748,20 +1423,7 @@ pub mod env { } /// Remove the calling account and transfer remaining **free** balance. - /// - /// This function never returns. Either the termination was successful and the - /// execution of the destroyed contract is halted. Or it failed during the termination - /// which is considered fatal and results in a trap + rollback. - /// - /// - `beneficiary_ptr`: a pointer to the address of the beneficiary account where all where all - /// remaining funds of the caller are transferred. Should be decodable as an `T::AccountId`. - /// Traps otherwise. - /// - /// # Traps - /// - /// - The contract is live i.e is already on the call stack. - /// - Failed to send the balance to the beneficiary. - /// - The deletion queue is full. + /// See [`pallet_contracts_uapi::HostFn::terminate_v1`]. #[version(1)] #[prefixed_alias] fn terminate(ctx: _, memory: _, beneficiary_ptr: u32) -> Result<(), TrapReason> { @@ -1769,15 +1431,7 @@ pub mod env { } /// Stores the input passed by the caller into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// # Note - /// - /// This function traps if the input was previously forwarded by a [`call()`][`Self::call()`]. + /// See [`pallet_contracts_uapi::HostFn::input`]. #[prefixed_alias] fn input(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::InputBase)?; @@ -1793,22 +1447,7 @@ pub mod env { } /// Cease contract execution and save a data buffer as a result of the execution. - /// - /// This function never returns as it stops execution of the caller. - /// This is the only way to return a data buffer to the caller. Returning from - /// execution without calling this function is equivalent to calling: - /// ```nocompile - /// seal_return(0, 0, 0); - /// ``` - /// - /// The flags argument is a bitfield that can be used to signal special return - /// conditions to the supervisor: - /// --- lsb --- - /// bit 0 : REVERT - Revert all storage changes made by the caller. - /// bit [1, 31]: Reserved for future use. - /// --- msb --- - /// - /// Using a reserved bit triggers a trap. + /// See [`pallet_contracts_uapi::HostFn::return_value`]. fn seal_return( ctx: _, memory: _, @@ -1824,18 +1463,7 @@ pub mod env { } /// Stores the address of the caller into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the - /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then - /// the address of the contract will be returned. The value is encoded as T::AccountId. - /// - /// If there is no address associated with the caller (e.g. because the caller is root) then - /// it traps with `BadOrigin`. + /// See [`pallet_contracts_uapi::HostFn::caller`]. #[prefixed_alias] fn caller(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Caller)?; @@ -1851,13 +1479,7 @@ pub mod env { } /// Checks whether a specified address belongs to a contract. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the address of the beneficiary account Should be decodable as - /// an `T::AccountId`. Traps otherwise. - /// - /// Returned value is a `u32`-encoded boolean: (0 = false, 1 = true). + /// See [`pallet_contracts_uapi::HostFn::is_contract`]. #[prefixed_alias] fn is_contract(ctx: _, memory: _, account_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::IsContract)?; @@ -1868,18 +1490,7 @@ pub mod env { } /// Retrieve the code hash for a specified contract address. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the address in question. Should be decodable as an - /// `T::AccountId`. Traps otherwise. - /// - `out_ptr`: pointer to the linear memory where the returning value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// # Errors - /// - /// - `ReturnCode::KeyNotFound` + /// See [`pallet_contracts_uapi::HostFn::code_hash`]. #[prefixed_alias] fn code_hash( ctx: _, @@ -1887,7 +1498,7 @@ pub mod env { account_ptr: u32, out_ptr: u32, out_len_ptr: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::CodeHash)?; let address: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(memory, account_ptr)?; @@ -1900,19 +1511,14 @@ pub mod env { false, already_charged, )?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } else { - Ok(ReturnCode::KeyNotFound) + Ok(ReturnErrorCode::KeyNotFound) } } /// Retrieve the code hash of the currently executing contract. - /// - /// # Parameters - /// - /// - `out_ptr`: pointer to the linear memory where the returning value is written to. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. + /// See [`pallet_contracts_uapi::HostFn::own_code_hash`]. #[prefixed_alias] fn own_code_hash(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::OwnCodeHash)?; @@ -1928,15 +1534,7 @@ pub mod env { } /// Checks whether the caller of the current contract is the origin of the whole call stack. - /// - /// Prefer this over [`is_contract()`][`Self::is_contract`] when checking whether your contract - /// is being called by a contract or a plain account. The reason is that it performs better - /// since it does not need to do any storage lookups. - /// - /// A return value of `true` indicates that this contract is being called by a plain account - /// and `false` indicates that the caller is another contract. - /// - /// Returned value is a `u32`-encoded boolean: (`0 = false`, `1 = true`). + /// See [`pallet_contracts_uapi::HostFn::caller_is_origin`]. #[prefixed_alias] fn caller_is_origin(ctx: _, _memory: _) -> Result { ctx.charge_gas(RuntimeCosts::CallerIsOrigin)?; @@ -1944,14 +1542,7 @@ pub mod env { } /// Checks whether the caller of the current contract is root. - /// - /// Note that only the origin of the call stack can be root. Hence this function returning - /// `true` implies that the contract is being called by the origin. - /// - /// A return value of `true` indicates that this contract is being called by a root origin, - /// and `false` indicates that the caller is a signed origin. - /// - /// Returned value is a `u32`-encoded boolean: (`0 = false`, `1 = true`). + /// See [`pallet_contracts_uapi::HostFn::caller_is_root`]. #[unstable] fn caller_is_root(ctx: _, _memory: _) -> Result { ctx.charge_gas(RuntimeCosts::CallerIsRoot)?; @@ -1959,11 +1550,7 @@ pub mod env { } /// Stores the address of the current contract into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// See [`pallet_contracts_uapi::HostFn::address`]. #[prefixed_alias] fn address(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Address)?; @@ -1978,10 +1565,7 @@ pub mod env { } /// Stores the price for the specified amount of gas into the supplied buffer. - /// - /// Equivalent to the newer [`seal1`][`super::api_doc::Version2::weight_to_fee`] version but - /// works with *ref_time* Weight only. It is recommended to switch to the latest version, once - /// it's stabilized. + /// See [`pallet_contracts_uapi::HostFn::weight_to_fee`]. #[prefixed_alias] fn weight_to_fee( ctx: _, @@ -2003,21 +1587,7 @@ pub mod env { } /// Stores the price for the specified amount of weight into the supplied buffer. - /// - /// # Parameters - /// - /// - `out_ptr`: pointer to the linear memory where the returning value is written to. If the - /// available space at `out_ptr` is less than the size of the value a trap is triggered. - /// - `out_len_ptr`: in-out pointer into linear memory where the buffer length is read from and - /// the value length is written to. - /// - /// The data is encoded as `T::Balance`. - /// - /// # Note - /// - /// It is recommended to avoid specifying very small values for `ref_time_limit` and - /// `proof_size_limit` as the prices for a single gas can be smaller than the basic balance - /// unit. + /// See [`pallet_contracts_uapi::HostFn::weight_to_fee_v1`]. #[version(1)] #[unstable] fn weight_to_fee( @@ -2041,10 +1611,7 @@ pub mod env { } /// Stores the weight left into the supplied buffer. - /// - /// Equivalent to the newer [`seal1`][`super::api_doc::Version2::gas_left`] version but - /// works with *ref_time* Weight only. It is recommended to switch to the latest version, once - /// it's stabilized. + /// See [`pallet_contracts_uapi::HostFn::gas_left`]. #[prefixed_alias] fn gas_left(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::GasLeft)?; @@ -2060,13 +1627,7 @@ pub mod env { } /// Stores the amount of weight left into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as Weight. + /// See [`pallet_contracts_uapi::HostFn::gas_left_v1`]. #[version(1)] #[unstable] fn gas_left(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { @@ -2083,13 +1644,7 @@ pub mod env { } /// Stores the *free* balance of the current account into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as `T::Balance`. + /// See [`pallet_contracts_uapi::HostFn::balance`]. #[prefixed_alias] fn balance(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Balance)?; @@ -2104,13 +1659,7 @@ pub mod env { } /// Stores the value transferred along with this call/instantiate into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a `u32` value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// The data is encoded as `T::Balance`. + /// See [`pallet_contracts_uapi::HostFn::value_transferred`]. #[prefixed_alias] fn value_transferred( ctx: _, @@ -2210,11 +1759,7 @@ pub mod env { } /// Load the latest block timestamp into the supplied buffer - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// See [`pallet_contracts_uapi::HostFn::now`]. #[prefixed_alias] fn now(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::Now)?; @@ -2229,8 +1774,7 @@ pub mod env { } /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. - /// - /// The data is encoded as `T::Balance`. + /// See [`pallet_contracts_uapi::HostFn::minimum_balance`]. #[prefixed_alias] fn minimum_balance( ctx: _, @@ -2378,15 +1922,8 @@ pub mod env { )?) } - /// Deposit a contract event with the data buffer and optional list of topics. There is a limit - /// on the maximum number of topics specified by `event_topics`. - /// - /// - `topics_ptr`: a pointer to the buffer of topics encoded as `Vec`. The value of - /// this is ignored if `topics_len` is set to `0`. The topics list can't contain duplicates. - /// - `topics_len`: the length of the topics buffer. Pass 0 if you want to pass an empty - /// vector. - /// - `data_ptr`: a pointer to a raw data buffer which will saved along the event. - /// - `data_len`: the length of the data buffer. + /// Deposit a contract event with the data buffer and optional list of topics. + /// See [pallet_contracts_uapi::HostFn::deposit_event] #[prefixed_alias] fn deposit_event( ctx: _, @@ -2422,11 +1959,7 @@ pub mod env { } /// Stores the current block number of the current contract into the supplied buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// `out_len_ptr` must point to a u32 value that describes the available space at - /// `out_ptr`. This call overwrites it with the size of the value. If the available - /// space at `out_ptr` is less than the size of the value a trap is triggered. + /// See [`pallet_contracts_uapi::HostFn::block_number`]. #[prefixed_alias] fn block_number(ctx: _, memory: _, out_ptr: u32, out_len_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::BlockNumber)?; @@ -2441,22 +1974,7 @@ pub mod env { } /// Computes the SHA2 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. + /// See [`pallet_contracts_uapi::HostFn::hash_sha2_256`]. #[prefixed_alias] fn hash_sha2_256( ctx: _, @@ -2472,22 +1990,7 @@ pub mod env { } /// Computes the KECCAK 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. + /// See [`pallet_contracts_uapi::HostFn::hash_keccak_256`]. #[prefixed_alias] fn hash_keccak_256( ctx: _, @@ -2503,22 +2006,7 @@ pub mod env { } /// Computes the BLAKE2 256-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 32 bytes (256 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. + /// See [`pallet_contracts_uapi::HostFn::hash_blake2_256`]. #[prefixed_alias] fn hash_blake2_256( ctx: _, @@ -2534,22 +2022,7 @@ pub mod env { } /// Computes the BLAKE2 128-bit hash on the given input buffer. - /// - /// Returns the result directly into the given output buffer. - /// - /// # Note - /// - /// - The `input` and `output` buffer may overlap. - /// - The output buffer is expected to hold at least 16 bytes (128 bits). - /// - It is the callers responsibility to provide an output buffer that is large enough to hold - /// the expected amount of bytes returned by the chosen hash function. - /// - /// # Parameters - /// - /// - `input_ptr`: the pointer into the linear memory where the input data is placed. - /// - `input_len`: the length of the input data in bytes. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. + /// See [`pallet_contracts_uapi::HostFn::hash_blake2_128`]. #[prefixed_alias] fn hash_blake2_128( ctx: _, @@ -2565,16 +2038,7 @@ pub mod env { } /// Call into the chain extension provided by the chain if any. - /// - /// Handling of the input values is up to the specific chain extension and so is the - /// return value. The extension can decide to use the inputs as primitive inputs or as - /// in/out arguments by interpreting them as pointers. Any caller of this function - /// must therefore coordinate with the chain that it targets. - /// - /// # Note - /// - /// If no chain extension exists the contract will trap with the `NoChainExtension` - /// module error. + /// See [`pallet_contracts_uapi::HostFn::call_chain_extension`]. #[prefixed_alias] fn call_chain_extension( ctx: _, @@ -2627,7 +2091,7 @@ pub mod env { memory: _, str_ptr: u32, str_len: u32, - ) -> Result { + ) -> Result { let str_len = str_len.min(DebugBufferVec::::bound() as u32); ctx.charge_gas(RuntimeCosts::DebugMessage(str_len))?; if ctx.ext.append_debug_buffer("") { @@ -2636,47 +2100,17 @@ pub mod env { ctx.ext.append_debug_buffer(msg); } } - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } /// Call some dispatchable of the runtime. - /// - /// This function decodes the passed in data as the overarching `Call` type of the - /// runtime and dispatches it. The weight as specified in the runtime is charged - /// from the gas meter. Any weight refunds made by the dispatchable are considered. - /// - /// The filter specified by `Config::CallFilter` is attached to the origin of - /// the dispatched call. - /// - /// # Parameters - /// - /// - `call_ptr`: the pointer into the linear memory where the input data is placed. - /// - `call_len`: the length of the input data in bytes. - /// - /// # Return Value - /// - /// Returns `ReturnCode::Success` when the dispatchable was successfully executed and - /// returned `Ok`. When the dispatchable was executed but returned an error - /// `ReturnCode::CallRuntimeFailed` is returned. The full error is not - /// provided because it is not guaranteed to be stable. - /// - /// # Comparison with `ChainExtension` - /// - /// Just as a chain extension this API allows the runtime to extend the functionality - /// of contracts. While making use of this function is generally easier it cannot be - /// used in all cases. Consider writing a chain extension if you need to do perform - /// one of the following tasks: - /// - /// - Return data. - /// - Provide functionality **exclusively** to contracts. - /// - Provide custom weights. - /// - Avoid the need to keep the `Call` data structure stable. + /// See [`frame_support::traits::call_runtime`]. fn call_runtime( ctx: _, memory: _, call_ptr: u32, call_len: u32, - ) -> Result { + ) -> Result { use frame_support::dispatch::GetDispatchInfo; ctx.charge_gas(RuntimeCosts::CopyFromContract(call_len))?; let call: ::RuntimeCall = @@ -2687,21 +2121,7 @@ pub mod env { } /// Execute an XCM program locally, using the contract's address as the origin. - /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that - /// the function is called directly instead of being dispatched. - /// - /// # Parameters - /// - /// - `msg_ptr`: the pointer into the linear memory where the [`xcm::prelude::VersionedXcm`] is - /// placed. - /// - `msg_len`: the length of the message in bytes. - /// - `output_ptr`: the pointer into the linear memory where the [`xcm::prelude::Outcome`] - /// message id is placed. - /// - /// # Return Value - /// - /// Returns `ReturnCode::Success` when the XCM was successfully executed. When the XCM - /// execution fails, `ReturnCode::XcmExecutionFailed` is returned. + /// See [`pallet_contracts_uapi::HostFn::execute_xcm`]. #[unstable] fn xcm_execute( ctx: _, @@ -2709,7 +2129,7 @@ pub mod env { msg_ptr: u32, msg_len: u32, output_ptr: u32, - ) -> Result { + ) -> Result { use frame_support::dispatch::DispatchInfo; use xcm::VersionedXcm; use xcm_builder::{ExecuteController, ExecuteControllerWeightInfo}; @@ -2740,23 +2160,7 @@ pub mod env { } /// Send an XCM program from the contract to the specified destination. - /// This is equivalent to dispatching `pallet_xcm::send` through `call_runtime`, except that - /// the function is called directly instead of being dispatched. - /// - /// # Parameters - /// - /// - `dest_ptr`: the pointer into the linear memory where the - /// [`xcm::prelude::VersionedMultiLocation`] is placed. - /// - `msg_ptr`: the pointer into the linear memory where the [`xcm::prelude::VersionedXcm`] is - /// placed. - /// - `msg_len`: the length of the message in bytes. - /// - `output_ptr`: the pointer into the linear memory where the [`xcm::v3::XcmHash`] message id - /// is placed. - /// - /// # Return Value - /// - /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM - /// execution fails, `ReturnCode::CallRuntimeFailed` is returned. + /// See [`pallet_contracts_uapi::HostFn::send_xcm`]. #[unstable] fn xcm_send( ctx: _, @@ -2765,7 +2169,7 @@ pub mod env { msg_ptr: u32, msg_len: u32, output_ptr: u32, - ) -> Result { + ) -> Result { use xcm::{VersionedMultiLocation, VersionedXcm}; use xcm_builder::{SendController, SendControllerWeightInfo}; @@ -2781,35 +2185,20 @@ pub mod env { match <::Xcm>::send(origin, dest.into(), message.into()) { Ok(message_id) => { ctx.write_sandbox_memory(memory, output_ptr, &message_id.encode())?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) }, Err(e) => { if ctx.ext.append_debug_buffer("") { ctx.ext.append_debug_buffer("seal0::xcm_send failed with: "); ctx.ext.append_debug_buffer(e.into()); }; - Ok(ReturnCode::XcmSendFailed) + Ok(ReturnErrorCode::XcmSendFailed) }, } } /// Recovers the ECDSA public key from the given message hash and signature. - /// - /// Writes the public key into the given output buffer. - /// Assumes the secp256k1 curve. - /// - /// # Parameters - /// - /// - `signature_ptr`: the pointer into the linear memory where the signature is placed. Should - /// be decodable as a 65 bytes. Traps otherwise. - /// - `message_hash_ptr`: the pointer into the linear memory where the message hash is placed. - /// Should be decodable as a 32 bytes. Traps otherwise. - /// - `output_ptr`: the pointer into the linear memory where the output data is placed. The - /// buffer should be 33 bytes. The function will write the result directly into this buffer. - /// - /// # Errors - /// - /// - `ReturnCode::EcdsaRecoverFailed` + /// See [`pallet_contracts_uapi::HostFn::ecdsa_recover`]. #[prefixed_alias] fn ecdsa_recover( ctx: _, @@ -2817,7 +2206,7 @@ pub mod env { signature_ptr: u32, message_hash_ptr: u32, output_ptr: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::EcdsaRecovery)?; let mut signature: [u8; 65] = [0; 65]; @@ -2833,26 +2222,14 @@ pub mod env { // buffer. ctx.write_sandbox_memory(memory, output_ptr, pub_key.as_ref())?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) }, - Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), + Err(_) => Ok(ReturnErrorCode::EcdsaRecoveryFailed), } } /// Verify a sr25519 signature - /// - /// # Parameters - /// - /// - `signature_ptr`: the pointer into the linear memory where the signature is placed. Should - /// be a value of 64 bytes. - /// - `pub_key_ptr`: the pointer into the linear memory where the public key is placed. Should - /// be a value of 32 bytes. - /// - `message_len`: the length of the message payload. - /// - `message_ptr`: the pointer into the linear memory where the message is placed. - /// - /// # Errors - /// - /// - `ReturnCode::Sr25519VerifyFailed + /// See [`pallet_contracts_uapi::HostFn::sr25519_verify`]. #[unstable] fn sr25519_verify( ctx: _, @@ -2861,7 +2238,7 @@ pub mod env { pub_key_ptr: u32, message_len: u32, message_ptr: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::Sr25519Verify(message_len))?; let mut signature: [u8; 64] = [0; 64]; @@ -2873,41 +2250,16 @@ pub mod env { let message: Vec = ctx.read_sandbox_memory(memory, message_ptr, message_len)?; if ctx.ext.sr25519_verify(&signature, &message, &pub_key) { - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) } else { - Ok(ReturnCode::Sr25519VerifyFailed) + Ok(ReturnErrorCode::Sr25519VerifyFailed) } } /// Replace the contract code at the specified address with new code. - /// - /// # Note - /// - /// There are a couple of important considerations which must be taken into account when - /// using this API: - /// - /// 1. The storage at the code address will remain untouched. This means that contract - /// developers must ensure that the storage layout of the new code is compatible with that of - /// the old code. - /// - /// 2. Contracts using this API can't be assumed as having deterministic addresses. Said another - /// way, when using this API you lose the guarantee that an address always identifies a specific - /// code hash. - /// - /// 3. If a contract calls into itself after changing its code the new call would use - /// the new code. However, if the original caller panics after returning from the sub call it - /// would revert the changes made by [`set_code_hash()`][`Self::set_code_hash`] and the next - /// caller would use the old code. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: A pointer to the buffer that contains the new code hash. - /// - /// # Errors - /// - /// - `ReturnCode::CodeNotFound` + /// See [`pallet_contracts_uapi::HostFn::set_code_hash`]. #[prefixed_alias] - fn set_code_hash(ctx: _, memory: _, code_hash_ptr: u32) -> Result { + fn set_code_hash(ctx: _, memory: _, code_hash_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::SetCodeHash)?; let code_hash: CodeHash<::T> = ctx.read_sandbox_memory_as(memory, code_hash_ptr)?; @@ -2916,33 +2268,19 @@ pub mod env { let code = Runtime::::err_into_return_code(err)?; Ok(code) }, - Ok(()) => Ok(ReturnCode::Success), + Ok(()) => Ok(ReturnErrorCode::Success), } } /// Calculates Ethereum address from the ECDSA compressed public key and stores - /// it into the supplied buffer. - /// - /// # Parameters - /// - /// - `key_ptr`: a pointer to the ECDSA compressed public key. Should be decodable as a 33 bytes - /// value. Traps otherwise. - /// - `out_ptr`: the pointer into the linear memory where the output data is placed. The - /// function will write the result directly into this buffer. - /// - /// The value is stored to linear memory at the address pointed to by `out_ptr`. - /// If the available space at `out_ptr` is less than the size of the value a trap is triggered. - /// - /// # Errors - /// - /// - `ReturnCode::EcdsaRecoverFailed` + /// See [`pallet_contracts_uapi::HostFn::ecdsa_to_eth_address`]. #[prefixed_alias] fn ecdsa_to_eth_address( ctx: _, memory: _, key_ptr: u32, out_ptr: u32, - ) -> Result { + ) -> Result { ctx.charge_gas(RuntimeCosts::EcdsaToEthAddress)?; let mut compressed_key: [u8; 33] = [0; 33]; ctx.read_sandbox_memory_into_buf(memory, key_ptr, &mut compressed_key)?; @@ -2950,18 +2288,15 @@ pub mod env { match result { Ok(eth_address) => { ctx.write_sandbox_memory(memory, out_ptr, eth_address.as_ref())?; - Ok(ReturnCode::Success) + Ok(ReturnErrorCode::Success) }, - Err(_) => Ok(ReturnCode::EcdsaRecoverFailed), + Err(_) => Ok(ReturnErrorCode::EcdsaRecoveryFailed), } } /// Returns the number of times the currently executing contract exists on the call stack in /// addition to the calling instance. - /// - /// # Return Value - /// - /// Returns `0` when there is no reentrancy. + /// See [`pallet_contracts_uapi::HostFn::reentrance_count`]. #[unstable] fn reentrance_count(ctx: _, memory: _) -> Result { ctx.charge_gas(RuntimeCosts::ReentrantCount)?; @@ -2970,14 +2305,7 @@ pub mod env { /// Returns the number of times specified contract exists on the call stack. Delegated calls are /// not counted as separate calls. - /// - /// # Parameters - /// - /// - `account_ptr`: a pointer to the contract address. - /// - /// # Return Value - /// - /// Returns `0` when the contract does not exist on the call stack. + /// See [`pallet_contracts_uapi::HostFn::account_reentrance_count`]. #[unstable] fn account_reentrance_count(ctx: _, memory: _, account_ptr: u32) -> Result { ctx.charge_gas(RuntimeCosts::AccountEntranceCount)?; @@ -2987,19 +2315,14 @@ pub mod env { } /// Returns a nonce that is unique per contract instantiation. - /// - /// The nonce is incremented for each successful contract instantiation. This is a - /// sensible default salt for contract instantiations. + /// See [`pallet_contracts_uapi::HostFn::instantiation_nonce`]. fn instantiation_nonce(ctx: _, _memory: _) -> Result { ctx.charge_gas(RuntimeCosts::InstantationNonce)?; Ok(ctx.ext.nonce()) } /// Adds a new delegate dependency to the contract. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: A pointer to the code hash of the dependency. + /// See [`pallet_contracts_uapi::HostFn::add_delegate_dependency`]. #[unstable] fn add_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::AddDelegateDependency)?; @@ -3009,10 +2332,7 @@ pub mod env { } /// Removes the delegate dependency from the contract. - /// - /// # Parameters - /// - /// - `code_hash_ptr`: A pointer to the code hash of the dependency. + /// see [`pallet_contracts_uapi::HostFn::remove_delegate_dependency`]. #[unstable] fn remove_delegate_dependency(ctx: _, memory: _, code_hash_ptr: u32) -> Result<(), TrapReason> { ctx.charge_gas(RuntimeCosts::RemoveDelegateDependency)?; diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml new file mode 100644 index 00000000000..6717d574dfe --- /dev/null +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "pallet-contracts-uapi" +version = "4.0.0-dev" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "Exposes all the host functions that a contract can import." + +[dependencies] +paste = { version = "1.0", default-features = false } +bitflags = "1.0" +scale-info = { version = "2.10.0", default-features = false, features = ["derive"], optional = true } +scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ + "derive", + "max-encoded-len", +], optional = true } + +[features] +default = [ "scale" ] +scale = [ "dep:scale", "scale-info" ] + diff --git a/substrate/frame/contracts/uapi/src/flags.rs b/substrate/frame/contracts/uapi/src/flags.rs new file mode 100644 index 00000000000..32553817fb7 --- /dev/null +++ b/substrate/frame/contracts/uapi/src/flags.rs @@ -0,0 +1,73 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use bitflags::bitflags; + +bitflags! { + /// Flags used by a contract to customize exit behaviour. + #[cfg_attr(feature = "scale", derive(scale::Encode, scale::Decode, scale_info::TypeInfo))] + pub struct ReturnFlags: u32 { + /// If this bit is set all changes made by the contract execution are rolled back. + const REVERT = 0x0000_0001; + } +} + +bitflags! { + /// Flags used to change the behaviour of `seal_call` and `seal_delegate_call`. + pub struct CallFlags: u32 { + /// Forward the input of current function to the callee. + /// + /// Supplied input pointers are ignored when set. + /// + /// # Note + /// + /// A forwarding call will consume the current contracts input. Any attempt to + /// access the input after this call returns will lead to [`Error::InputForwarded`]. + /// It does not matter if this is due to calling `seal_input` or trying another + /// forwarding call. Consider using [`Self::CLONE_INPUT`] in order to preserve + /// the input. + const FORWARD_INPUT = 0b0000_0001; + /// Identical to [`Self::FORWARD_INPUT`] but without consuming the input. + /// + /// This adds some additional weight costs to the call. + /// + /// # Note + /// + /// This implies [`Self::FORWARD_INPUT`] and takes precedence when both are set. + const CLONE_INPUT = 0b0000_0010; + /// Do not return from the call but rather return the result of the callee to the + /// callers caller. + /// + /// # Note + /// + /// This makes the current contract completely transparent to its caller by replacing + /// this contracts potential output by the callee ones. Any code after `seal_call` + /// can be safely considered unreachable. + const TAIL_CALL = 0b0000_0100; + /// Allow the callee to reenter into the current contract. + /// + /// Without this flag any reentrancy into the current contract that originates from + /// the callee (or any of its callees) is denied. This includes the first callee: + /// You cannot call into yourself with this flag set. + /// + /// # Note + /// + /// For `seal_delegate_call` should be always unset, otherwise + /// [`Error::InvalidCallFlags`] is returned. + const ALLOW_REENTRY = 0b0000_1000; + } +} diff --git a/substrate/frame/contracts/uapi/src/host.rs b/substrate/frame/contracts/uapi/src/host.rs new file mode 100644 index 00000000000..f8b55d3822e --- /dev/null +++ b/substrate/frame/contracts/uapi/src/host.rs @@ -0,0 +1,793 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::{CallFlags, Result, ReturnFlags, SENTINEL}; +use paste::paste; + +#[cfg(target_arch = "wasm32")] +mod wasm32; + +#[cfg(target_arch = "riscv32")] +mod riscv32; + +macro_rules! hash_fn { + ( $name:ident, $bytes:literal ) => { + paste! { + #[doc = "Computes the " $name " " $bytes "-bit hash on the given input buffer."] + #[doc = "\n# Notes\n"] + #[doc = "- The `input` and `output` buffer may overlap."] + #[doc = "- The output buffer is expected to hold at least " $bytes " bits."] + #[doc = "- It is the callers responsibility to provide an output buffer that is large enough to hold the expected amount of bytes returned by the hash function."] + #[doc = "\n# Parameters\n"] + #[doc = "- `input`: The input data buffer."] + #[doc = "- `output`: The output buffer to write the hash result to."] + fn [](input: &[u8], output: &mut [u8; $bytes]); + } + }; +} + +fn extract_from_slice(output: &mut &mut [u8], new_len: usize) { + debug_assert!(new_len <= output.len()); + let tmp = core::mem::take(output); + *output = &mut tmp[..new_len]; +} + +fn ptr_len_or_sentinel(data: &mut Option<&mut [u8]>) -> (*mut u8, u32) { + match data { + Some(ref mut data) => (data.as_mut_ptr(), data.len() as _), + None => (SENTINEL as _, 0), + } +} + +fn ptr_or_sentinel(data: &Option<&[u8]>) -> *const u8 { + match data { + Some(ref data) => data.as_ptr(), + None => SENTINEL as _, + } +} + +/// Implements [`HostFn`] for each supported target architecture. +pub enum HostFnImpl {} + +/// Defines all the host apis implemented by both wasm and RISC-V vms. +pub trait HostFn { + /// Returns the number of times specified contract exists on the call stack. Delegated calls are + /// not counted as separate calls. + /// + /// # Parameters + /// + /// - `account`: The contract address. Should be decodable as an `T::AccountId`. Traps + /// otherwise. + /// + /// # Return + /// + /// Returns the number of times specified contract exists on the call stack. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn account_reentrance_count(account: &[u8]) -> u32; + + /// Stores the address of the current contract into the supplied buffer. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the address. + fn address(output: &mut &mut [u8]); + + /// Adds a new delegate dependency to the contract. + /// + /// Traps if the maximum number of delegate_dependencies is reached or if + /// the delegate dependency already exists. + /// + /// # Parameters + /// + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn add_delegate_dependency(code_hash: &[u8]); + + /// Stores the *free* balance of the current account into the supplied buffer. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the balance. + fn balance(output: &mut &mut [u8]); + + /// Stores the current block number of the current contract into the supplied buffer. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the block number. + fn block_number(output: &mut &mut [u8]); + + /// Make a call to another contract. + /// + /// This is equivalent to calling the newer version of this function with + /// `flags` set to [`CallFlags::ALLOW_REENTRY`]. See the newer version for documentation. + #[deprecated(note = "Deprecated, use newer version instead")] + fn call( + callee: &[u8], + gas: u64, + value: &[u8], + input_data: &[u8], + output: Option<&mut [u8]>, + ) -> Result; + + /// Make a call to another contract. + /// + /// Equivalent to the newer [`Self::call_v2`] version but works with + /// *ref_time* Weight only + fn call_v1( + flags: CallFlags, + callee: &[u8], + gas: u64, + value: &[u8], + input_data: &[u8], + output: Option<&mut [u8]>, + ) -> Result; + + /// Call (possibly transferring some amount of funds) into the specified account. + /// + /// # Parameters + /// + /// - `flags`: See [`CallFlags`] for a documentation of the supported flags. + /// - `callee`: The address of the callee. Should be decodable as an `T::AccountId`. Traps + /// otherwise. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit`: The storage deposit limit for instantiation. Should be decodable as a + /// `Option`. Traps otherwise. Passing `None` means setting no specific limit for + /// the call, which implies storage usage up to the limit of the parent call. + /// - `value`: The value to transfer into the contract. Should be decodable as a `T::Balance`. + /// Traps otherwise. + /// - `input`: The input data buffer used to call the contract. + /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` + /// is provided then the output buffer is not copied. + /// + /// # Errors + /// + /// An error means that the call wasn't successful output buffer is returned unless + /// stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + /// - [NotCallable][`crate::ReturnErrorCode::NotCallable] + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn call_v2( + flags: CallFlags, + callee: &[u8], + ref_time_limit: u64, + proof_time_limit: u64, + deposit: Option<&[u8]>, + value: &[u8], + input_data: &[u8], + output: Option<&mut [u8]>, + ) -> Result; + + /// Call into the chain extension provided by the chain if any. + /// + /// Handling of the input values is up to the specific chain extension and so is the + /// return value. The extension can decide to use the inputs as primitive inputs or as + /// in/out arguments by interpreting them as pointers. Any caller of this function + /// must therefore coordinate with the chain that it targets. + /// + /// # Note + /// + /// If no chain extension exists the contract will trap with the `NoChainExtension` + /// module error. + /// + /// # Parameters + /// + /// - `func_id`: The function id of the chain extension. + /// - `input`: The input data buffer. + /// - `output`: A reference to the output data buffer to write the output data. + /// + /// # Return + /// + /// The chain extension returned value, if executed successfully. + fn call_chain_extension(func_id: u32, input: &[u8], output: &mut &mut [u8]) -> u32; + + /// Call some dispatchable of the runtime. + /// + /// # Parameters + /// + /// - `call`: The call data. + /// + /// # Return + /// + /// Returns `Error::Success` when the dispatchable was successfully executed and + /// returned `Ok`. When the dispatchable was executed but returned an error + /// `Error::CallRuntimeFailed` is returned. The full error is not + /// provided because it is not guaranteed to be stable. + /// + /// # Comparison with `ChainExtension` + /// + /// Just as a chain extension this API allows the runtime to extend the functionality + /// of contracts. While making use of this function is generally easier it cannot be + /// used in all cases. Consider writing a chain extension if you need to do perform + /// one of the following tasks: + /// + /// - Return data. + /// - Provide functionality **exclusively** to contracts. + /// - Provide custom weights. + /// - Avoid the need to keep the `Call` data structure stable. + fn call_runtime(call: &[u8]) -> Result; + + /// Stores the address of the caller into the supplied buffer. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// If this is a top-level call (i.e. initiated by an extrinsic) the origin address of the + /// extrinsic will be returned. Otherwise, if this call is initiated by another contract then + /// the address of the contract will be returned. + /// + /// If there is no address associated with the caller (e.g. because the caller is root) then + /// it traps with `BadOrigin`. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the caller address. + fn caller(output: &mut &mut [u8]); + + /// Checks whether the caller of the current contract is the origin of the whole call stack. + /// + /// Prefer this over [`is_contract()`][`Self::is_contract`] when checking whether your contract + /// is being called by a contract or a plain account. The reason is that it performs better + /// since it does not need to do any storage lookups. + /// + /// # Return + /// + /// A return value of `true` indicates that this contract is being called by a plain account + /// and `false` indicates that the caller is another contract. + fn caller_is_origin() -> bool; + + /// Checks whether the caller of the current contract is root. + /// + /// Note that only the origin of the call stack can be root. Hence this function returning + /// `true` implies that the contract is being called by the origin. + /// + /// A return value of `true` indicates that this contract is being called by a root origin, + /// and `false` indicates that the caller is a signed origin. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn caller_is_root() -> u32; + + /// Clear the value at the given key in the contract storage. + /// + /// Equivalent to the newer [`Self::clear_storage_v1`] version with + /// the exception of the return type. Still a valid thing to call when not interested in the + /// return value. + fn clear_storage(key: &[u8]); + + /// Clear the value at the given key in the contract storage. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + fn clear_storage_v1(key: &[u8]) -> Option; + + /// Retrieve the code hash for a specified contract address. + /// + /// # Parameters + /// + /// - `account_id`: The address of the contract.Should be decodable as an `T::AccountId`. Traps + /// otherwise. + /// - `output`: A reference to the output data buffer to write the code hash. + /// + /// + /// # Errors + /// + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn code_hash(account_id: &[u8], output: &mut [u8]) -> Result; + + /// Checks whether there is a value stored under the given key. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + fn contains_storage(key: &[u8]) -> Option; + + /// Checks whether there is a value stored under the given key. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// - `key`: The storage key. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + fn contains_storage_v1(key: &[u8]) -> Option; + + /// Execute code in the context (storage, caller, value) of the current contract. + /// + /// Reentrancy protection is always disabled since the callee is allowed + /// to modify the callers storage. This makes going through a reentrancy attack + /// unnecessary for the callee when it wants to exploit the caller. + /// + /// # Parameters + /// + /// - `flags`: See [`CallFlags`] for a documentation of the supported flags. + /// - `code_hash`: The hash of the code to be executed. + /// - `input`: The input data buffer used to call the contract. + /// - `output`: A reference to the output data buffer to write the call output buffer. If `None` + /// is provided then the output buffer is not copied. + /// + /// # Errors + /// + /// An error means that the call wasn't successful and no output buffer is returned unless + /// stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn delegate_call( + flags: CallFlags, + code_hash: &[u8], + input_data: &[u8], + output: Option<&mut [u8]>, + ) -> Result; + + /// Deposit a contract event with the data buffer and optional list of topics. There is a limit + /// on the maximum number of topics specified by `event_topics`. + /// + /// There should not be any duplicates in `topics`. + /// + /// # Parameters + /// + /// - `topics`: The topics list encoded as `Vec`. It can't contain duplicates. + fn deposit_event(topics: &[u8], data: &[u8]); + + /// Recovers the ECDSA public key from the given message hash and signature. + /// + /// Writes the public key into the given output buffer. + /// Assumes the secp256k1 curve. + /// + /// # Parameters + /// + /// - `signature`: The signature bytes. + /// - `message_hash`: The message hash bytes. + /// - `output`: A reference to the output data buffer to write the public key. + /// + /// # Errors + /// + /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] + fn ecdsa_recover( + signature: &[u8; 65], + message_hash: &[u8; 32], + output: &mut [u8; 33], + ) -> Result; + + /// Calculates Ethereum address from the ECDSA compressed public key and stores + /// it into the supplied buffer. + /// + /// # Parameters + /// + /// - `pubkey`: The public key bytes. + /// - `output`: A reference to the output data buffer to write the address. + /// + /// # Errors + /// + /// - [EcdsaRecoveryFailed][`crate::ReturnErrorCode::EcdsaRecoveryFailed] + fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result; + + /// Stores the weight left into the supplied buffer. + /// + /// Equivalent to the newer [`Self::gas_left_v1`] version but + /// works with *ref_time* Weight only. + fn gas_left(out: &mut &mut [u8]); + + /// Stores the amount of weight left into the supplied buffer. + /// The data is encoded as Weight. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the weight left. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn gas_left_v1(output: &mut &mut [u8]); + + /// Retrieve the value under the given key from storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + fn get_storage(key: &[u8], output: &mut &mut [u8]) -> Result; + + /// Retrieve the value under the given key from storage. + /// + /// The key length must not exceed the maximum defined by the contracts module parameter. + /// + /// # Parameters + /// - `key`: The storage key. + /// - `output`: A reference to the output data buffer to write the storage entry. + /// + /// # Errors + /// + /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + fn get_storage_v1(key: &[u8], output: &mut &mut [u8]) -> Result; + + hash_fn!(sha2_256, 32); + hash_fn!(keccak_256, 32); + hash_fn!(blake2_256, 32); + hash_fn!(blake2_128, 16); + + /// Stores the input passed by the caller into the supplied buffer. + /// + /// # Note + /// + /// This function traps if: + /// - the input is larger than the available space. + /// - the input was previously forwarded by a [`call()`][`Self::call()`]. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the input data. + fn input(output: &mut &mut [u8]); + + /// Instantiate a contract with the specified code hash. + /// + /// Equivalent to the newer [`Self::instantiate_v2`] version but works + /// with *ref_time* Weight only. + fn instantiate_v1( + code_hash: &[u8], + gas: u64, + value: &[u8], + input: &[u8], + address: Option<&mut [u8]>, + output: Option<&mut [u8]>, + salt: &[u8], + ) -> Result; + + /// Instantiate a contract with the specified code hash. + /// + /// This function creates an account and executes the constructor defined in the code specified + /// by the code hash. + /// + /// # Parameters + /// + /// - `code_hash`: The hash of the code to be instantiated. + /// - `ref_time_limit`: how much *ref_time* Weight to devote to the execution. + /// - `proof_size_limit`: how much *proof_size* Weight to devote to the execution. + /// - `deposit`: The storage deposit limit for instantiation. Should be decodable as a + /// `Option`. Traps otherwise. Passing `None` means setting no specific limit for + /// the call, which implies storage usage up to the limit of the parent call. + /// - `value`: The value to transfer into the contract. Should be decodable as a `T::Balance`. + /// Traps otherwise. + /// - `input`: The input data buffer. + /// - `address`: A reference to the address buffer to write the address of the contract. If + /// `None` is provided then the output buffer is not copied. + /// - `output`: A reference to the return value buffer to write the constructor output buffer. + /// If `None` is provided then the output buffer is not copied. + /// - `salt`: The salt bytes to use for this instantiation. + /// + /// # Errors + /// + /// Please consult the [ReturnErrorCode][`crate::ReturnErrorCode`] enum declaration for more + /// information on those errors. Here we only note things specific to this function. + /// + /// An error means that the account wasn't created and no address or output buffer + /// is returned unless stated otherwise. + /// + /// - [CalleeReverted][`crate::ReturnErrorCode::CalleeReverted]: Output buffer is returned. + /// - [CalleeTrapped][`crate::ReturnErrorCode::CalleeTrapped] + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn instantiate_v2( + code_hash: &[u8], + ref_time_limit: u64, + proof_size_limit: u64, + deposit: Option<&[u8]>, + value: &[u8], + input: &[u8], + address: Option<&mut [u8]>, + output: Option<&mut [u8]>, + salt: &[u8], + ) -> Result; + + /// Returns a nonce that is unique per contract instantiation. + /// + /// The nonce is incremented for each successful contract instantiation. This is a + /// sensible default salt for contract instantiations. + fn instantiation_nonce() -> u64; + + /// Checks whether a specified address belongs to a contract. + /// + /// # Parameters + /// + /// - `account_id`: The address to check. Should be decodable as an `T::AccountId`. Traps + /// otherwise. + /// + /// # Return + /// + /// Returns `true` if the address belongs to a contract. + fn is_contract(account_id: &[u8]) -> bool; + + /// Stores the minimum balance (a.k.a. existential deposit) into the supplied buffer. + /// The data is encoded as `T::Balance`. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the minimum balance. + fn minimum_balance(output: &mut &mut [u8]); + + /// Retrieve the code hash of the currently executing contract. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the code hash. + fn own_code_hash(output: &mut [u8]); + + /// Load the latest block timestamp into the supplied buffer + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the timestamp. + fn now(output: &mut &mut [u8]); + + /// Returns the number of times the currently executing contract exists on the call stack in + /// addition to the calling instance. + /// + /// # Return + /// + /// Returns `0` when there is no reentrancy. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn reentrance_count() -> u32; + + /// Removes the delegate dependency from the contract. + /// + /// Traps if the delegate dependency does not exist. + /// + /// # Parameters + /// + /// - `code_hash`: The code hash of the dependency. Should be decodable as an `T::Hash`. Traps + /// otherwise. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn remove_delegate_dependency(code_hash: &[u8]); + + /// Cease contract execution and save a data buffer as a result of the execution. + /// + /// This function never returns as it stops execution of the caller. + /// This is the only way to return a data buffer to the caller. Returning from + /// execution without calling this function is equivalent to calling: + /// ```nocompile + /// return_value(ReturnFlags::empty(), &[]) + /// ``` + /// + /// Using an unnamed non empty `ReturnFlags` triggers a trap. + /// + /// # Parameters + /// + /// - `flags`: Flag used to signal special return conditions to the supervisor. See + /// [`ReturnFlags`] for a documentation of the supported flags. + /// - `return_value`: The return value buffer. + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> !; + + /// Replace the contract code at the specified address with new code. + /// + /// # Note + /// + /// There are a couple of important considerations which must be taken into account when + /// using this API: + /// + /// 1. The storage at the code address will remain untouched. This means that contract + /// developers must ensure that the storage layout of the new code is compatible with that of + /// the old code. + /// + /// 2. Contracts using this API can't be assumed as having deterministic addresses. Said another + /// way, when using this API you lose the guarantee that an address always identifies a specific + /// code hash. + /// + /// 3. If a contract calls into itself after changing its code the new call would use + /// the new code. However, if the original caller panics after returning from the sub call it + /// would revert the changes made by [`set_code_hash()`][`Self::set_code_hash`] and the next + /// caller would use the old code. + /// + /// # Parameters + /// + /// - `code_hash`: The hash of the new code. Should be decodable as an `T::Hash`. Traps + /// otherwise. + /// + /// # Errors + /// + /// - [CodeNotFound][`crate::ReturnErrorCode::CodeNotFound] + fn set_code_hash(code_hash: &[u8]) -> Result; + + /// Set the value at the given key in the contract storage. + /// + /// Equivalent to [`Self::set_storage_v1`] version with the + /// exception of the return type. Still a valid thing to call for fixed sized storage key, when + /// not interested in the return value. + fn set_storage(key: &[u8], value: &[u8]); + + /// Set the value at the given key in the contract storage. + /// + /// This version is to be used with a fixed sized storage key. For runtimes supporting + /// transparent hashing, please use the newer version of this function. + fn set_storage_v1(key: &[u8], value: &[u8]) -> Option; + + /// Set the value at the given key in the contract storage. + /// + /// The key and value lengths must not exceed the maximums defined by the contracts module + /// parameters. + /// + /// # Parameters + /// + /// - `key`: The storage key. + /// - `encoded_value`: The storage value. + /// + /// # Return + /// + /// Returns the size of the pre-existing value at the specified key if any. + fn set_storage_v2(key: &[u8], value: &[u8]) -> Option; + + /// Verify a sr25519 signature + /// + /// # Parameters + /// + /// - `signature`: The signature bytes. + /// - `message`: The message bytes. + /// + /// # Errors + /// + /// - [Sr25519VerifyFailed][`crate::ReturnErrorCode::Sr25519VerifyFailed] + fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result; + + /// Retrieve and remove the value under the given key from storage. + /// + /// # Parameters + /// - `key`: The storage key. + /// - `output`: A reference to the output data buffer to write the storage entry. + /// + /// # Errors + /// + /// [KeyNotFound][`crate::ReturnErrorCode::KeyNotFound] + fn take_storage(key: &[u8], output: &mut &mut [u8]) -> Result; + + /// Transfer some amount of funds into the specified account. + /// + /// # Parameters + /// + /// - `account_id`: The address of the account to transfer funds to. Should be decodable as an + /// `T::AccountId`. Traps otherwise. + /// - `value`: The value to transfer. Should be decodable as a `T::Balance`. Traps otherwise. + /// + /// # Errors + /// + /// - [TransferFailed][`crate::ReturnErrorCode::TransferFailed] + fn transfer(account_id: &[u8], value: &[u8]) -> Result; + + /// Remove the calling account and transfer remaining balance. + /// + /// This is equivalent to calling the newer version of this function + #[deprecated(note = "Deprecated, use newer version instead")] + fn terminate(beneficiary: &[u8]) -> !; + + /// Remove the calling account and transfer remaining **free** balance. + /// + /// This function never returns. Either the termination was successful and the + /// execution of the destroyed contract is halted. Or it failed during the termination + /// which is considered fatal and results in a trap + rollback. + /// + /// # Parameters + /// + /// - `beneficiary`: The address of the beneficiary account, Should be decodable as an + /// `T::AccountId`. + /// + /// # Traps + /// + /// - The contract is live i.e is already on the call stack. + /// - Failed to send the balance to the beneficiary. + /// - The deletion queue is full. + fn terminate_v1(beneficiary: &[u8]) -> !; + + /// Stores the value transferred along with this call/instantiate into the supplied buffer. + /// The data is encoded as `T::Balance`. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `output`: A reference to the output data buffer to write the transferred value. + fn value_transferred(output: &mut &mut [u8]); + + /// Stores the price for the specified amount of gas into the supplied buffer. + /// + /// Equivalent to the newer [`Self::weight_to_fee_v1`] version but + /// works with *ref_time* Weight only. It is recommended to switch to the latest version, once + /// it's stabilized. + fn weight_to_fee(gas: u64, output: &mut &mut [u8]); + + /// Stores the price for the specified amount of gas into the supplied buffer. + /// The data is encoded as `T::Balance`. + /// + /// If the available space in `output` is less than the size of the value a trap is triggered. + /// + /// # Parameters + /// + /// - `ref_time_limit`: The *ref_time* Weight limit to query the price for. + /// - `proof_size_limit`: The *proof_size* Weight limit to query the price for. + /// - `output`: A reference to the output data buffer to write the price. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn weight_to_fee_v1(ref_time_limit: u64, proof_size_limit: u64, output: &mut &mut [u8]); + + /// Execute an XCM program locally, using the contract's address as the origin. + /// This is equivalent to dispatching `pallet_xcm::execute` through call_runtime, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), + /// traps otherwise. + /// - `output`: A reference to the output data buffer to write the [Outcome](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/v3/enum.Outcome.html) + /// + /// # Return + /// + /// Returns `Error::Success` when the XCM execution attempt is successful. When the XCM + /// execution fails, `ReturnCode::XcmExecutionFailed` is returned + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn xcm_execute(msg: &[u8], output: &mut &mut [u8]) -> Result; + + /// Send an XCM program from the contract to the specified destination. + /// This is equivalent to dispatching `pallet_xcm::send` through `call_runtime`, except that + /// the function is called directly instead of being dispatched. + /// + /// # Parameters + /// + /// - `dest`: The XCM destination, should be decodable as [VersionedMultiLocation](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedMultiLocation.html), + /// traps otherwise. + /// - `msg`: The message, should be decodable as a [VersionedXcm](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/enum.VersionedXcm.html), + /// traps otherwise. + /// - `output`: A reference to the output data buffer to write the [XcmHash](https://paritytech.github.io/polkadot-sdk/master/staging_xcm/v3/type.XcmHash.html) + /// + /// # Return + /// + /// Returns `ReturnCode::Success` when the message was successfully sent. When the XCM + /// execution fails, `ReturnErrorCode::XcmSendFailed` is returned. + #[deprecated( + note = "Unstable function. Behaviour can change without further notice. Use only for testing." + )] + fn xcm_send(dest: &[u8], msg: &[u8], output: &mut &mut [u8]) -> Result; +} diff --git a/substrate/frame/contracts/uapi/src/host/riscv32.rs b/substrate/frame/contracts/uapi/src/host/riscv32.rs new file mode 100644 index 00000000000..f58b8831f06 --- /dev/null +++ b/substrate/frame/contracts/uapi/src/host/riscv32.rs @@ -0,0 +1,14 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// TODO: bring up to date with wasm32.rs diff --git a/substrate/frame/contracts/uapi/src/host/wasm32.rs b/substrate/frame/contracts/uapi/src/host/wasm32.rs new file mode 100644 index 00000000000..d30058daf3d --- /dev/null +++ b/substrate/frame/contracts/uapi/src/host/wasm32.rs @@ -0,0 +1,811 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use super::{ + extract_from_slice, ptr_len_or_sentinel, ptr_or_sentinel, CallFlags, HostFn, HostFnImpl, Result, +}; +use crate::{ReturnCode, ReturnFlags}; + +mod sys { + use super::ReturnCode; + + #[link(wasm_import_module = "seal0")] + extern "C" { + pub fn account_reentrance_count(account_ptr: *const u8) -> u32; + + pub fn add_delegate_dependency(code_hash_ptr: *const u8); + + pub fn address(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn balance(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn block_number(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn call( + callee_ptr: *const u8, + callee_len: u32, + gas: u64, + value_ptr: *const u8, + value_len: u32, + input_data_ptr: *const u8, + input_data_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn call_chain_extension( + func_id: u32, + input_ptr: *const u8, + input_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn call_runtime(call_ptr: *const u8, call_len: u32) -> ReturnCode; + + pub fn caller(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn caller_is_origin() -> ReturnCode; + + pub fn caller_is_root() -> ReturnCode; + + pub fn clear_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + + pub fn code_hash( + account_id_ptr: *const u8, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn contains_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + + pub fn delegate_call( + flags: u32, + code_hash_ptr: *const u8, + input_data_ptr: *const u8, + input_data_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn deposit_event( + topics_ptr: *const u8, + topics_len: u32, + data_ptr: *const u8, + data_len: u32, + ); + + pub fn ecdsa_recover( + signature_ptr: *const u8, + message_hash_ptr: *const u8, + output_ptr: *mut u8, + ) -> ReturnCode; + + pub fn ecdsa_to_eth_address(public_key_ptr: *const u8, output_ptr: *mut u8) -> ReturnCode; + + pub fn gas_left(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn get_storage( + key_ptr: *const u8, + key_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn hash_blake2_128(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); + + pub fn hash_blake2_256(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); + + pub fn hash_keccak_256(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); + + pub fn hash_sha2_256(input_ptr: *const u8, input_len: u32, output_ptr: *mut u8); + + pub fn input(buf_ptr: *mut u8, buf_len_ptr: *mut u32); + + pub fn instantiation_nonce() -> u64; + + pub fn is_contract(account_id_ptr: *const u8) -> ReturnCode; + + pub fn minimum_balance(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn now(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn own_code_hash(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn reentrance_count() -> u32; + + pub fn remove_delegate_dependency(code_hash_ptr: *const u8); + + pub fn seal_return(flags: u32, data_ptr: *const u8, data_len: u32) -> !; + + pub fn set_code_hash(code_hash_ptr: *const u8) -> ReturnCode; + + pub fn set_storage( + key_ptr: *const u8, + key_len: u32, + value_ptr: *const u8, + value_len: u32, + ) -> ReturnCode; + + pub fn sr25519_verify( + signature_ptr: *const u8, + public_key_ptr: *const u8, + message_len: u32, + message_ptr: *const u8, + ) -> ReturnCode; + + pub fn take_storage( + key_ptr: *const u8, + key_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn terminate(beneficiary_ptr: *const u8) -> !; + + pub fn transfer( + account_id_ptr: *const u8, + account_id_len: u32, + transferred_value_ptr: *const u8, + transferred_value_len: u32, + ) -> ReturnCode; + + pub fn value_transferred(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn weight_to_fee(gas: u64, output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn xcm_execute(msg_ptr: *const u8, msg_len: u32, output_ptr: *mut u8) -> ReturnCode; + + pub fn xcm_send( + dest_ptr: *const u8, + msg_ptr: *const u8, + msg_len: u32, + output_ptr: *mut u8, + ) -> ReturnCode; + } + + pub mod v1 { + use crate::ReturnCode; + + #[link(wasm_import_module = "seal1")] + extern "C" { + pub fn call( + flags: u32, + callee_ptr: *const u8, + gas: u64, + transferred_value_ptr: *const u8, + input_data_ptr: *const u8, + input_data_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn clear_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + + pub fn contains_storage(key_ptr: *const u8, key_len: u32) -> ReturnCode; + + pub fn gas_left(output_ptr: *mut u8, output_len_ptr: *mut u32); + + pub fn get_storage( + key_ptr: *const u8, + key_len: u32, + out_ptr: *mut u8, + out_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn instantiate( + code_hash_ptr: *const u8, + gas: u64, + value_ptr: *const u8, + input_ptr: *const u8, + input_len: u32, + address_ptr: *mut u8, + address_len_ptr: *mut u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + salt_ptr: *const u8, + salt_len: u32, + ) -> ReturnCode; + + pub fn set_storage( + key_ptr: *const u8, + key_len: u32, + value_ptr: *const u8, + value_len: u32, + ) -> ReturnCode; + + pub fn terminate(beneficiary_ptr: *const u8) -> !; + + pub fn weight_to_fee( + ref_time_limit: u64, + proof_time_limit: u64, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ); + } + } + + pub mod v2 { + use crate::ReturnCode; + + #[link(wasm_import_module = "seal2")] + extern "C" { + pub fn call( + flags: u32, + callee_ptr: *const u8, + ref_time_limit: u64, + proof_time_limit: u64, + deposit_ptr: *const u8, + transferred_value_ptr: *const u8, + input_data_ptr: *const u8, + input_data_len: u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + ) -> ReturnCode; + + pub fn instantiate( + code_hash_ptr: *const u8, + ref_time_limit: u64, + proof_time_limit: u64, + deposit_ptr: *const u8, + value_ptr: *const u8, + input_ptr: *const u8, + input_len: u32, + address_ptr: *mut u8, + address_len_ptr: *mut u32, + output_ptr: *mut u8, + output_len_ptr: *mut u32, + salt_ptr: *const u8, + salt_len: u32, + ) -> ReturnCode; + + pub fn set_storage( + key_ptr: *const u8, + key_len: u32, + value_ptr: *const u8, + value_len: u32, + ) -> ReturnCode; + } + } +} + +/// A macro to implement all Host functions with a signature of `fn(&mut &mut [u8])`. +macro_rules! impl_wrapper_for { + (@impl_fn $( $mod:ident )::*, $suffix:literal, $name:ident) => { + paste::paste! { + fn [<$name $suffix>](output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { + $( $mod )::*::$name(output.as_mut_ptr(), &mut output_len); + } + } + } + }; + + () => {}; + + (($mod:ident, $suffix:literal) => [$( $name:ident),*], $($tail:tt)*) => { + $(impl_wrapper_for!(@impl_fn sys::$mod, $suffix, $name);)* + impl_wrapper_for!($($tail)*); + }; + + (() => [$( $name:ident),*], $($tail:tt)*) => { + $(impl_wrapper_for!(@impl_fn sys, "", $name);)* + impl_wrapper_for!($($tail)*); + }; +} + +/// A macro to implement all the hash functions Apis. +macro_rules! impl_hash_fn { + ( $name:ident, $bytes_result:literal ) => { + paste::item! { + fn [](input: &[u8], output: &mut [u8; $bytes_result]) { + unsafe { + sys::[]( + input.as_ptr(), + input.len() as u32, + output.as_mut_ptr(), + ) + } + } + } + }; +} + +/// A macro to implement the get_storage functions. +macro_rules! impl_get_storage { + ($fn_name:ident, $sys_get_storage:path) => { + fn $fn_name(key: &[u8], output: &mut &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + $sys_get_storage( + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into() + } + }; +} + +impl HostFn for HostFnImpl { + fn instantiate_v1( + code_hash: &[u8], + gas: u64, + value: &[u8], + input: &[u8], + mut address: Option<&mut [u8]>, + mut output: Option<&mut [u8]>, + salt: &[u8], + ) -> Result { + let (address_ptr, mut address_len) = ptr_len_or_sentinel(&mut address); + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let ret_code = unsafe { + sys::v1::instantiate( + code_hash.as_ptr(), + gas, + value.as_ptr(), + input.as_ptr(), + input.len() as u32, + address_ptr, + &mut address_len, + output_ptr, + &mut output_len, + salt.as_ptr(), + salt.len() as u32, + ) + }; + + if let Some(ref mut address) = address { + extract_from_slice(address, address_len as usize); + } + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + ret_code.into() + } + + fn instantiate_v2( + code_hash: &[u8], + ref_time_limit: u64, + proof_size_limit: u64, + deposit: Option<&[u8]>, + value: &[u8], + input: &[u8], + mut address: Option<&mut [u8]>, + mut output: Option<&mut [u8]>, + salt: &[u8], + ) -> Result { + let (address_ptr, mut address_len) = ptr_len_or_sentinel(&mut address); + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let deposit_ptr = ptr_or_sentinel(&deposit); + + let ret_code = { + unsafe { + sys::v2::instantiate( + code_hash.as_ptr(), + ref_time_limit, + proof_size_limit, + deposit_ptr, + value.as_ptr(), + input.as_ptr(), + input.len() as u32, + address_ptr, + &mut address_len, + output_ptr, + &mut output_len, + salt.as_ptr(), + salt.len() as u32, + ) + } + }; + + if let Some(ref mut address) = address { + extract_from_slice(address, address_len as usize); + } + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn call( + callee: &[u8], + gas: u64, + value: &[u8], + input_data: &[u8], + mut output: Option<&mut [u8]>, + ) -> Result { + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let ret_code = { + unsafe { + sys::call( + callee.as_ptr(), + callee.len() as u32, + gas, + value.as_ptr(), + value.len() as u32, + input_data.as_ptr(), + input_data.len() as u32, + output_ptr, + &mut output_len, + ) + } + }; + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn call_v1( + flags: CallFlags, + callee: &[u8], + gas: u64, + value: &[u8], + input_data: &[u8], + mut output: Option<&mut [u8]>, + ) -> Result { + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let ret_code = { + unsafe { + sys::v1::call( + flags.bits(), + callee.as_ptr(), + gas, + value.as_ptr(), + input_data.as_ptr(), + input_data.len() as u32, + output_ptr, + &mut output_len, + ) + } + }; + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn call_v2( + flags: CallFlags, + callee: &[u8], + ref_time_limit: u64, + proof_time_limit: u64, + deposit: Option<&[u8]>, + value: &[u8], + input_data: &[u8], + mut output: Option<&mut [u8]>, + ) -> Result { + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let deposit_ptr = ptr_or_sentinel(&deposit); + let ret_code = { + unsafe { + sys::v2::call( + flags.bits(), + callee.as_ptr(), + ref_time_limit, + proof_time_limit, + deposit_ptr, + value.as_ptr(), + input_data.as_ptr(), + input_data.len() as u32, + output_ptr, + &mut output_len, + ) + } + }; + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn caller_is_root() -> u32 { + unsafe { sys::caller_is_root() }.into_u32() + } + + fn delegate_call( + flags: CallFlags, + code_hash: &[u8], + input: &[u8], + mut output: Option<&mut [u8]>, + ) -> Result { + let (output_ptr, mut output_len) = ptr_len_or_sentinel(&mut output); + let ret_code = { + unsafe { + sys::delegate_call( + flags.bits(), + code_hash.as_ptr(), + input.as_ptr(), + input.len() as u32, + output_ptr, + &mut output_len, + ) + } + }; + + if let Some(ref mut output) = output { + extract_from_slice(output, output_len as usize); + } + + ret_code.into() + } + + fn transfer(account_id: &[u8], value: &[u8]) -> Result { + let ret_code = unsafe { + sys::transfer( + account_id.as_ptr(), + account_id.len() as u32, + value.as_ptr(), + value.len() as u32, + ) + }; + ret_code.into() + } + + fn deposit_event(topics: &[u8], data: &[u8]) { + unsafe { + sys::deposit_event( + topics.as_ptr(), + topics.len() as u32, + data.as_ptr(), + data.len() as u32, + ) + } + } + + fn set_storage(key: &[u8], value: &[u8]) { + unsafe { + sys::set_storage(key.as_ptr(), key.len() as u32, value.as_ptr(), value.len() as u32) + }; + } + + fn set_storage_v1(key: &[u8], encoded_value: &[u8]) -> Option { + let ret_code = unsafe { + sys::v1::set_storage( + key.as_ptr(), + key.len() as u32, + encoded_value.as_ptr(), + encoded_value.len() as u32, + ) + }; + ret_code.into() + } + + fn set_storage_v2(key: &[u8], encoded_value: &[u8]) -> Option { + let ret_code = unsafe { + sys::v2::set_storage( + key.as_ptr(), + key.len() as u32, + encoded_value.as_ptr(), + encoded_value.len() as u32, + ) + }; + ret_code.into() + } + + fn clear_storage(key: &[u8]) { + unsafe { sys::clear_storage(key.as_ptr(), key.len() as u32) }; + } + + fn clear_storage_v1(key: &[u8]) -> Option { + let ret_code = unsafe { sys::v1::clear_storage(key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + + impl_get_storage!(get_storage, sys::get_storage); + impl_get_storage!(get_storage_v1, sys::v1::get_storage); + + fn take_storage(key: &[u8], output: &mut &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + sys::take_storage( + key.as_ptr(), + key.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into() + } + + fn contains_storage(key: &[u8]) -> Option { + let ret_code = unsafe { sys::contains_storage(key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + + fn contains_storage_v1(key: &[u8]) -> Option { + let ret_code = unsafe { sys::v1::contains_storage(key.as_ptr(), key.len() as u32) }; + ret_code.into() + } + + fn terminate(beneficiary: &[u8]) -> ! { + unsafe { sys::terminate(beneficiary.as_ptr()) } + } + + fn terminate_v1(beneficiary: &[u8]) -> ! { + unsafe { sys::v1::terminate(beneficiary.as_ptr()) } + } + + fn call_chain_extension(func_id: u32, input: &[u8], output: &mut &mut [u8]) -> u32 { + let mut output_len = output.len() as u32; + let ret_code = { + unsafe { + sys::call_chain_extension( + func_id, + input.as_ptr(), + input.len() as u32, + output.as_mut_ptr(), + &mut output_len, + ) + } + }; + extract_from_slice(output, output_len as usize); + ret_code.into_u32() + } + + fn input(output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + { + unsafe { sys::input(output.as_mut_ptr(), &mut output_len) }; + } + extract_from_slice(output, output_len as usize); + } + + fn return_value(flags: ReturnFlags, return_value: &[u8]) -> ! { + unsafe { sys::seal_return(flags.bits(), return_value.as_ptr(), return_value.len() as u32) } + } + + fn call_runtime(call: &[u8]) -> Result { + let ret_code = unsafe { sys::call_runtime(call.as_ptr(), call.len() as u32) }; + ret_code.into() + } + + impl_wrapper_for! { + () => [caller, block_number, address, balance, gas_left, value_transferred, now, minimum_balance], + (v1, "_v1") => [gas_left], + } + + fn weight_to_fee(gas: u64, output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + { + unsafe { sys::weight_to_fee(gas, output.as_mut_ptr(), &mut output_len) }; + } + extract_from_slice(output, output_len as usize); + } + + fn weight_to_fee_v1(ref_time_limit: u64, proof_size_limit: u64, output: &mut &mut [u8]) { + let mut output_len = output.len() as u32; + { + unsafe { + sys::v1::weight_to_fee( + ref_time_limit, + proof_size_limit, + output.as_mut_ptr(), + &mut output_len, + ) + }; + } + extract_from_slice(output, output_len as usize); + } + + impl_hash_fn!(sha2_256, 32); + impl_hash_fn!(keccak_256, 32); + impl_hash_fn!(blake2_256, 32); + impl_hash_fn!(blake2_128, 16); + + fn ecdsa_recover( + signature: &[u8; 65], + message_hash: &[u8; 32], + output: &mut [u8; 33], + ) -> Result { + let ret_code = unsafe { + sys::ecdsa_recover(signature.as_ptr(), message_hash.as_ptr(), output.as_mut_ptr()) + }; + ret_code.into() + } + + fn ecdsa_to_eth_address(pubkey: &[u8; 33], output: &mut [u8; 20]) -> Result { + let ret_code = unsafe { sys::ecdsa_to_eth_address(pubkey.as_ptr(), output.as_mut_ptr()) }; + ret_code.into() + } + + fn sr25519_verify(signature: &[u8; 64], message: &[u8], pub_key: &[u8; 32]) -> Result { + let ret_code = unsafe { + sys::sr25519_verify( + signature.as_ptr(), + pub_key.as_ptr(), + message.len() as u32, + message.as_ptr(), + ) + }; + ret_code.into() + } + + fn is_contract(account_id: &[u8]) -> bool { + let ret_val = unsafe { sys::is_contract(account_id.as_ptr()) }; + ret_val.into_bool() + } + + fn caller_is_origin() -> bool { + let ret_val = unsafe { sys::caller_is_origin() }; + ret_val.into_bool() + } + + fn set_code_hash(code_hash: &[u8]) -> Result { + let ret_val = unsafe { sys::set_code_hash(code_hash.as_ptr()) }; + ret_val.into() + } + + fn code_hash(account_id: &[u8], output: &mut [u8]) -> Result { + let mut output_len = output.len() as u32; + let ret_val = + unsafe { sys::code_hash(account_id.as_ptr(), output.as_mut_ptr(), &mut output_len) }; + ret_val.into() + } + + fn own_code_hash(output: &mut [u8]) { + let mut output_len = output.len() as u32; + unsafe { sys::own_code_hash(output.as_mut_ptr(), &mut output_len) } + } + + fn account_reentrance_count(account: &[u8]) -> u32 { + unsafe { sys::account_reentrance_count(account.as_ptr()) } + } + + fn add_delegate_dependency(code_hash: &[u8]) { + unsafe { sys::add_delegate_dependency(code_hash.as_ptr()) } + } + + fn remove_delegate_dependency(code_hash: &[u8]) { + unsafe { sys::remove_delegate_dependency(code_hash.as_ptr()) } + } + + fn instantiation_nonce() -> u64 { + unsafe { sys::instantiation_nonce() } + } + + fn reentrance_count() -> u32 { + unsafe { sys::reentrance_count() } + } + + fn xcm_execute(msg: &[u8], output: &mut &mut [u8]) -> Result { + let ret_code = + unsafe { sys::xcm_execute(msg.as_ptr(), msg.len() as _, output.as_mut_ptr()) }; + ret_code.into() + } + + fn xcm_send(dest: &[u8], msg: &[u8], output: &mut &mut [u8]) -> Result { + let ret_code = unsafe { + sys::xcm_send(dest.as_ptr(), msg.as_ptr(), msg.len() as _, output.as_mut_ptr()) + }; + ret_code.into() + } +} diff --git a/substrate/frame/contracts/uapi/src/lib.rs b/substrate/frame/contracts/uapi/src/lib.rs new file mode 100644 index 00000000000..3d384bbb85d --- /dev/null +++ b/substrate/frame/contracts/uapi/src/lib.rs @@ -0,0 +1,139 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! External C API to communicate with substrate contracts runtime module. +//! +//! Refer to substrate FRAME contract module for more documentation. + +#![no_std] + +mod flags; +pub use flags::*; + +#[cfg(any(target_arch = "wasm32", target_arch = "riscv32"))] +mod host; + +#[cfg(any(target_arch = "wasm32", target_arch = "riscv32"))] +pub use host::*; + +macro_rules! define_error_codes { + ( + $( + $( #[$attr:meta] )* + $name:ident = $discr:literal, + )* + ) => { + /// Every error that can be returned to a contract when it calls any of the host functions. + #[derive(Debug)] + #[repr(u32)] + pub enum ReturnErrorCode { + /// API call successful. + Success = 0, + $( + $( #[$attr] )* + $name = $discr, + )* + /// Returns if an unknown error was received from the host module. + Unknown, + } + + impl From for Result { + #[inline] + fn from(return_code: ReturnCode) -> Self { + match return_code.0 { + 0 => Ok(()), + $( + $discr => Err(ReturnErrorCode::$name), + )* + _ => Err(ReturnErrorCode::Unknown), + } + } + } + }; +} + +impl From for u32 { + fn from(code: ReturnErrorCode) -> u32 { + code as u32 + } +} + +define_error_codes! { + /// The called function trapped and has its state changes reverted. + /// In this case no output buffer is returned. + /// Can only be returned from `call` and `instantiate`. + CalleeTrapped = 1, + /// The called function ran to completion but decided to revert its state. + /// An output buffer is returned when one was supplied. + /// Can only be returned from `call` and `instantiate`. + CalleeReverted = 2, + /// The passed key does not exist in storage. + KeyNotFound = 3, + /// Deprecated and no longer returned: There is only the minimum balance. + _BelowSubsistenceThreshold = 4, + /// Transfer failed for other not further specified reason. Most probably + /// reserved or locked balance of the sender that was preventing the transfer. + TransferFailed = 5, + /// Deprecated and no longer returned: Endowment is no longer required. + _EndowmentTooLow = 6, + /// No code could be found at the supplied code hash. + CodeNotFound = 7, + /// The account that was called is no contract. + NotCallable = 8, + /// The call to `debug_message` had no effect because debug message + /// recording was disabled. + LoggingDisabled = 9, + /// The call dispatched by `call_runtime` was executed but returned an error. + CallRuntimeFailed = 10, + /// ECDSA public key recovery failed. Most probably wrong recovery id or signature. + EcdsaRecoveryFailed = 11, + /// sr25519 signature verification failed. + Sr25519VerifyFailed = 12, + /// The `xcm_execute` call failed. + XcmExecutionFailed = 13, + /// The `xcm_send` call failed. + XcmSendFailed = 14, +} + +/// The raw return code returned by the host side. +#[repr(transparent)] +pub struct ReturnCode(u32); + +/// Used as a sentinel value when reading and writing contract memory. +/// +/// We use this value to signal `None` to a contract when only a primitive is +/// allowed and we don't want to go through encoding a full Rust type. +/// Using `u32::Max` is a safe sentinel because contracts are never +/// allowed to use such a large amount of resources. So this value doesn't +/// make sense for a memory location or length. +const SENTINEL: u32 = u32::MAX; + +impl From for Option { + fn from(code: ReturnCode) -> Self { + (code.0 < SENTINEL).then_some(code.0) + } +} + +impl ReturnCode { + /// Returns the raw underlying `u32` representation. + pub fn into_u32(self) -> u32 { + self.0 + } + /// Returns the underlying `u32` converted into `bool`. + pub fn into_bool(self) -> bool { + self.0.ne(&0) + } +} + +type Result = core::result::Result<(), ReturnErrorCode>; -- GitLab From e41a0e728bc300a3f384414559bb906adedc5125 Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Thu, 30 Nov 2023 09:03:47 +0100 Subject: [PATCH 165/199] upgraded review bot to 2.3.0 (#2549) Upgraded to version 2.3.0 which includes: - paritytech/review-bot#103 - paritytech/review-bot#102 --- .github/workflows/review-bot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/review-bot.yml b/.github/workflows/review-bot.yml index 5970989cde0..0a7e80f007c 100644 --- a/.github/workflows/review-bot.yml +++ b/.github/workflows/review-bot.yml @@ -23,7 +23,7 @@ jobs: app_id: ${{ secrets.REVIEW_APP_ID }} private_key: ${{ secrets.REVIEW_APP_KEY }} - name: "Evaluates PR reviews and assigns reviewers" - uses: paritytech/review-bot@v2.2.0 + uses: paritytech/review-bot@v2.3.0 with: repo-token: ${{ steps.app_token.outputs.token }} team-token: ${{ steps.app_token.outputs.token }} -- GitLab From 180a6ad9b0e7f6225304b811f4accf1d13c5dd11 Mon Sep 17 00:00:00 2001 From: Aaro Altonen <48052676+altonen@users.noreply.github.com> Date: Thu, 30 Nov 2023 11:52:20 +0200 Subject: [PATCH 166/199] Register metrics for the notification handles (#2562) Add metrics for notification handles so substream events are correctly reported to Prometheus --- substrate/client/network/src/protocol.rs | 11 ++++++++--- .../client/network/src/protocol/notifications.rs | 2 +- .../network/src/protocol/notifications/behaviour.rs | 6 ++---- .../network/src/protocol/notifications/tests.rs | 2 +- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index ea7977cc9ae..73b1cd97279 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -43,7 +43,7 @@ use sp_runtime::traits::Block as BlockT; use std::{collections::HashSet, iter, task::Poll}; -use notifications::{Notifications, NotificationsOut}; +use notifications::{metrics, Notifications, NotificationsOut}; pub(crate) use notifications::ProtocolHandle; @@ -95,7 +95,7 @@ impl Protocol { // NOTE: Block announcement protocol is still very much hardcoded into // `Protocol`. This protocol must be the first notification protocol given to // `Notifications` - let (protocol_configs, handles): (Vec<_>, Vec<_>) = iter::once({ + let (protocol_configs, mut handles): (Vec<_>, Vec<_>) = iter::once({ let config = notifications::ProtocolConfig { name: block_announces_protocol.protocol_name().clone(), fallback_names: block_announces_protocol.fallback_names().cloned().collect(), @@ -122,11 +122,16 @@ impl Protocol { })) .unzip(); + let metrics = registry.as_ref().and_then(|registry| metrics::register(®istry).ok()); + handles.iter_mut().for_each(|handle| { + handle.set_metrics(metrics.clone()); + }); + ( Notifications::new( protocol_controller_handles, from_protocol_controllers, - registry, + metrics, protocol_configs.into_iter(), ), installed_protocols, diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs index 10fa329097d..8becc1390e7 100644 --- a/substrate/client/network/src/protocol/notifications.rs +++ b/substrate/client/network/src/protocol/notifications.rs @@ -25,7 +25,7 @@ pub use self::{ service::{notification_service, ProtocolHandlePair}, }; -pub(crate) use self::service::ProtocolHandle; +pub(crate) use self::service::{metrics, ProtocolHandle}; mod behaviour; mod handler; diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index ef0c6540eee..cdbf2a71b93 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -40,7 +40,6 @@ use libp2p::{ }; use log::{debug, error, trace, warn}; use parking_lot::RwLock; -use prometheus_endpoint::Registry; use rand::distributions::{Distribution as _, Uniform}; use sc_utils::mpsc::TracingUnboundedReceiver; use smallvec::SmallVec; @@ -405,7 +404,7 @@ impl Notifications { pub(crate) fn new( protocol_controller_handles: Vec, from_protocol_controllers: TracingUnboundedReceiver, - registry: &Option, + metrics: Option, notif_protocols: impl Iterator< Item = ( ProtocolConfig, @@ -429,7 +428,6 @@ impl Notifications { .unzip(); assert!(!notif_protocols.is_empty()); - let metrics = registry.as_ref().and_then(|registry| metrics::register(®istry).ok()); let (mut protocol_handles, command_streams): (Vec<_>, Vec<_>) = protocol_handles .into_iter() .enumerate() @@ -2452,7 +2450,7 @@ mod tests { Notifications::new( vec![handle], from_controller, - &None, + None, iter::once(( ProtocolConfig { name: "/foo".into(), diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index 92d269f89c3..0178bd75e8b 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -99,7 +99,7 @@ fn build_nodes() -> (Swarm, Swarm) { inner: Notifications::new( vec![controller_handle], from_controller, - &None, + None, iter::once(( ProtocolConfig { name: "/foo".into(), -- GitLab From eaf1bc5633ebbacce97e4f167ebe1d0d268c4b24 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:15:46 +0300 Subject: [PATCH 167/199] Introduce Polkadot-Sdk `developer_hub` (#2102) This PR introduces the new crate `developer_hub` into the polkadot-sdk repo. The vision for the developer-hub crate is detailed in [this document](https://docs.google.com/document/d/1XLLkFNE8v8HLvZpI2rzsa8N2IN1FcKntc8q-Sc4xBAk/edit?usp=sharing). Screenshot 2023-11-02 at 10 45 48 Other than adding the new crate, it also does the following: * Remove the `substrate` crate, as there is now a unique umbrella crate for multiple things in `developer_hub::polkadot_sdk`. * (backport candidate) A minor change to `frame-support` macros that allows `T::RuntimeOrigin` to also be acceptable as the origin type. * (backport candidate) A minor change to `frame-system` that allows us to deposit events at genesis because now the real genesis config is generated via wasm, and we can safely assume `cfg!(feature = "std")` means only testing. related to #62. * (backport candidate) Introduces a small `read_events_for_pallet` to `frame_system` for easier event reading in tests. * From https://github.com/paritytech/polkadot-sdk-docs/issues/31, it takes action on improving the `pallet::call` docs. * From https://github.com/paritytech/polkadot-sdk-docs/issues/31, it takes action on improving the `UncheckedExtrinsic` docs. ## Way Forward First, a version of this is deployed temporarily [here](https://blog.kianenigma.nl/polkadot-sdk/developer_hub/index.html). I will keep it up to date on a daily basis. ### This Pull Request I see two ways forward: 1. We acknowledge that everything in `developer-hub` is going to be WIP, and merge this asap. We should not yet use links to this crate anywhere. 2. We make this be the feature branch, make PRs against this, and either gradually backport it, or only merge to master once it is done. I am personally in favor of option 1. If we stick to option 2, we need a better way to deploy a staging version of this to gh-pages. ### Issue Tracking The main issues related to the future of `developer_hub` are: - https://github.com/paritytech/polkadot-sdk-docs/issues/31 - https://github.com/paritytech/polkadot-sdk-docs/issues/4 - https://github.com/paritytech/polkadot-sdk-docs/issues/26 - https://github.com/paritytech/polkadot-sdk-docs/issues/32 - https://github.com/paritytech/polkadot-sdk-docs/issues/36 ### After This Pull Request - [ ] create a redirect for https://paritytech.github.io/polkadot-sdk/master/substrate/ - [x] analytics - [ ] link checker - [ ] the matter of publishing, and how all of these relative links for when we do, that is still an open question. There is section on this in the landing page. - [ ] updated https://paritytech.github.io/ --------- Co-authored-by: Liam Aharon Co-authored-by: Juan Girini Co-authored-by: bader y Co-authored-by: Sebastian Kunert Co-authored-by: James Wilson Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> --- .gitlab/pipeline/build.yml | 2 +- Cargo.lock | 76 +- Cargo.toml | 5 +- cumulus/parachain-template/runtime/src/lib.rs | 3 +- developer-hub/Cargo.toml | 66 ++ developer-hub/headers/toc.html | 54 ++ .../src/guides/changing_consensus.rs | 1 + .../src/guides/cumulus_enabled_parachain.rs | 1 + developer-hub/src/guides/mod.rs | 25 + .../src/guides/xcm_enabled_parachain.rs | 1 + developer-hub/src/guides/your_first_node.rs | 1 + .../src/guides/your_first_pallet/mod.rs | 738 ++++++++++++++++++ .../guides/your_first_pallet/with_event.rs | 101 +++ .../src/guides/your_first_runtime.rs | 1 + developer-hub/src/lib.rs | 43 + developer-hub/src/meta_contributing.rs | 146 ++++ developer-hub/src/polkadot_sdk/cumulus.rs | 130 +++ .../src/polkadot_sdk/frame_runtime.rs | 179 +++++ developer-hub/src/polkadot_sdk/mod.rs | 134 ++++ developer-hub/src/polkadot_sdk/polkadot.rs | 87 +++ .../src/polkadot_sdk/smart_contracts.rs | 9 + developer-hub/src/polkadot_sdk/substrate.rs | 151 ++++ developer-hub/src/polkadot_sdk/templates.rs | 45 ++ developer-hub/src/polkadot_sdk/xcm.rs | 5 + .../reference_docs/blockchain_scalibility.rs | 0 .../blockchain_state_machines.rs | 29 + .../src/reference_docs/chain_spec_genesis.rs | 4 + developer-hub/src/reference_docs/cli.rs | 7 + .../src/reference_docs/consensus_swapping.rs | 6 + .../src/reference_docs/extrinsic_encoding.rs | 277 +++++++ .../src/reference_docs/fee_less_runtime.rs | 12 + .../frame_benchmarking_weight.rs | 23 + .../reference_docs/frame_composite_enums.rs | 1 + .../src/reference_docs/frame_currency.rs | 8 + .../src/reference_docs/frame_origin.rs | 14 + .../reference_docs/frame_runtime_migration.rs | 9 + .../reference_docs/frame_system_accounts.rs | 8 + developer-hub/src/reference_docs/glossary.rs | 62 ++ .../src/reference_docs/light_nodes.rs | 7 + developer-hub/src/reference_docs/metadata.rs | 1 + developer-hub/src/reference_docs/mod.rs | 99 +++ .../runtime_vs_smart_contract.rs | 6 + .../safe_defensive_programming.rs | 1 + .../src/reference_docs/signed_extensions.rs | 79 ++ .../reference_docs/trait_based_programming.rs | 229 ++++++ .../src/reference_docs/wasm_memory.rs | 7 + .../src/reference_docs/wasm_meta_protocol.rs | 113 +++ docs/mermaid/IA.mmd | 14 + docs/mermaid/extrinsics.mmd | 5 + docs/mermaid/polkadot_sdk_parachain.mmd | 11 + docs/mermaid/polkadot_sdk_polkadot.mmd | 10 + docs/mermaid/polkadot_sdk_substrate.mmd | 8 + docs/mermaid/state.mmd | 16 + docs/mermaid/stf.mmd | 21 + docs/mermaid/stf_simple.mmd | 4 + docs/mermaid/substrate_client_runtime.mmd | 8 +- docs/mermaid/substrate_dev.mmd | 2 + docs/mermaid/substrate_simple.mmd | 2 +- docs/mermaid/substrate_with_frame.mmd | 6 +- .../protocol/src/request_response/mod.rs | 4 +- polkadot/node/primitives/src/lib.rs | 2 +- substrate/Cargo.toml | 30 - substrate/client/allocator/src/lib.rs | 2 +- substrate/client/executor/src/lib.rs | 2 +- substrate/frame/balances/src/lib.rs | 48 +- substrate/frame/bounties/src/migrations/v4.rs | 4 +- .../frame/collective/src/migrations/v4.rs | 4 +- .../election-provider-multi-phase/src/mock.rs | 6 +- .../elections-phragmen/src/migrations/v4.rs | 4 +- .../elections-phragmen/src/migrations/v5.rs | 2 +- .../frame/examples/kitchensink/src/lib.rs | 5 +- substrate/frame/fast-unstake/src/types.rs | 1 + substrate/frame/grandpa/src/migrations/v4.rs | 4 +- .../frame/membership/src/migrations/v4.rs | 4 +- .../frame/session/src/historical/offchain.rs | 10 +- substrate/frame/src/lib.rs | 36 +- substrate/frame/support/procedural/src/lib.rs | 46 +- .../procedural/src/pallet/parse/call.rs | 34 +- substrate/frame/support/src/dispatch.rs | 3 +- substrate/frame/support/src/lib.rs | 152 +++- substrate/frame/support/src/migrations.rs | 3 +- substrate/frame/support/src/storage/child.rs | 6 +- substrate/frame/support/src/storage/mod.rs | 6 +- .../deprecated_where_block.stderr | 8 +- .../pallet_ui/call_invalid_origin_type.stderr | 2 +- substrate/frame/system/Cargo.toml | 2 +- substrate/frame/system/src/lib.rs | 24 +- substrate/frame/tips/src/migrations/v4.rs | 4 +- substrate/primitives/api/src/lib.rs | 16 +- .../primitives/npos-elections/src/lib.rs | 5 +- .../runtime-interface/src/pass_by.rs | 2 +- substrate/primitives/runtime/Cargo.toml | 3 + .../runtime/src/generic/checked_extrinsic.rs | 8 +- .../src/generic/unchecked_extrinsic.rs | 32 +- .../runtime/src/offchain/storage_lock.rs | 2 +- substrate/src/lib.rs | 242 ------ 96 files changed, 3410 insertions(+), 471 deletions(-) create mode 100644 developer-hub/Cargo.toml create mode 100644 developer-hub/headers/toc.html create mode 100644 developer-hub/src/guides/changing_consensus.rs create mode 100644 developer-hub/src/guides/cumulus_enabled_parachain.rs create mode 100644 developer-hub/src/guides/mod.rs create mode 100644 developer-hub/src/guides/xcm_enabled_parachain.rs create mode 100644 developer-hub/src/guides/your_first_node.rs create mode 100644 developer-hub/src/guides/your_first_pallet/mod.rs create mode 100644 developer-hub/src/guides/your_first_pallet/with_event.rs create mode 100644 developer-hub/src/guides/your_first_runtime.rs create mode 100644 developer-hub/src/lib.rs create mode 100644 developer-hub/src/meta_contributing.rs create mode 100644 developer-hub/src/polkadot_sdk/cumulus.rs create mode 100644 developer-hub/src/polkadot_sdk/frame_runtime.rs create mode 100644 developer-hub/src/polkadot_sdk/mod.rs create mode 100644 developer-hub/src/polkadot_sdk/polkadot.rs create mode 100644 developer-hub/src/polkadot_sdk/smart_contracts.rs create mode 100644 developer-hub/src/polkadot_sdk/substrate.rs create mode 100644 developer-hub/src/polkadot_sdk/templates.rs create mode 100644 developer-hub/src/polkadot_sdk/xcm.rs create mode 100644 developer-hub/src/reference_docs/blockchain_scalibility.rs create mode 100644 developer-hub/src/reference_docs/blockchain_state_machines.rs create mode 100644 developer-hub/src/reference_docs/chain_spec_genesis.rs create mode 100644 developer-hub/src/reference_docs/cli.rs create mode 100644 developer-hub/src/reference_docs/consensus_swapping.rs create mode 100644 developer-hub/src/reference_docs/extrinsic_encoding.rs create mode 100644 developer-hub/src/reference_docs/fee_less_runtime.rs create mode 100644 developer-hub/src/reference_docs/frame_benchmarking_weight.rs create mode 100644 developer-hub/src/reference_docs/frame_composite_enums.rs create mode 100644 developer-hub/src/reference_docs/frame_currency.rs create mode 100644 developer-hub/src/reference_docs/frame_origin.rs create mode 100644 developer-hub/src/reference_docs/frame_runtime_migration.rs create mode 100644 developer-hub/src/reference_docs/frame_system_accounts.rs create mode 100644 developer-hub/src/reference_docs/glossary.rs create mode 100644 developer-hub/src/reference_docs/light_nodes.rs create mode 100644 developer-hub/src/reference_docs/metadata.rs create mode 100644 developer-hub/src/reference_docs/mod.rs create mode 100644 developer-hub/src/reference_docs/runtime_vs_smart_contract.rs create mode 100644 developer-hub/src/reference_docs/safe_defensive_programming.rs create mode 100644 developer-hub/src/reference_docs/signed_extensions.rs create mode 100644 developer-hub/src/reference_docs/trait_based_programming.rs create mode 100644 developer-hub/src/reference_docs/wasm_memory.rs create mode 100644 developer-hub/src/reference_docs/wasm_meta_protocol.rs create mode 100644 docs/mermaid/IA.mmd create mode 100644 docs/mermaid/extrinsics.mmd create mode 100644 docs/mermaid/polkadot_sdk_parachain.mmd create mode 100644 docs/mermaid/polkadot_sdk_polkadot.mmd create mode 100644 docs/mermaid/polkadot_sdk_substrate.mmd create mode 100644 docs/mermaid/state.mmd create mode 100644 docs/mermaid/stf.mmd create mode 100644 docs/mermaid/stf_simple.mmd create mode 100644 docs/mermaid/substrate_dev.mmd delete mode 100644 substrate/Cargo.toml delete mode 100644 substrate/src/lib.rs diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index 5c13045706c..d6918173d49 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -125,7 +125,7 @@ build-rustdoc: find "$path" -name '*.html' | xargs -I {} -P "$(nproc)" bash -c 'process_file "$@"' _ {} } inject_simple_analytics "./crate-docs" - - echo "" > ./crate-docs/index.html + - echo "" > ./crate-docs/index.html build-implementers-guide: stage: build diff --git a/Cargo.lock b/Cargo.lock index 346ce2f933f..de4471783ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4366,6 +4366,45 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "developer-hub" +version = "0.0.1" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "docify", + "frame", + "kitchensink-runtime", + "pallet-aura", + "pallet-default-config-example", + "pallet-examples", + "pallet-timestamp", + "parity-scale-codec", + "sc-cli", + "sc-client-db", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-consensus-beefy", + "sc-consensus-grandpa", + "sc-consensus-manual-seal", + "sc-consensus-pow", + "sc-network", + "sc-rpc", + "sc-rpc-api", + "scale-info", + "simple-mermaid 0.1.0 (git+https://github.com/kianenigma/simple-mermaid.git?branch=main)", + "sp-api", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "staging-chain-spec-builder", + "staging-node-cli", + "staging-parachain-info", + "subkey", + "substrate-wasm-builder", +] + [[package]] name = "diff" version = "0.1.13" @@ -5201,7 +5240,7 @@ dependencies = [ "pallet-examples", "parity-scale-codec", "scale-info", - "simple-mermaid", + "simple-mermaid 0.1.0 (git+https://github.com/kianenigma/simple-mermaid.git?rev=e48b187bcfd5cc75111acd9d241f1bd36604344b)", "sp-api", "sp-arithmetic", "sp-block-builder", @@ -11175,9 +11214,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" +checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -11190,9 +11229,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.4" +version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" +checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -16644,6 +16683,11 @@ dependencies = [ "wide", ] +[[package]] +name = "simple-mermaid" +version = "0.1.0" +source = "git+https://github.com/kianenigma/simple-mermaid.git?branch=main#e48b187bcfd5cc75111acd9d241f1bd36604344b" + [[package]] name = "simple-mermaid" version = "0.1.0" @@ -17497,6 +17541,7 @@ dependencies = [ name = "sp-runtime" version = "24.0.0" dependencies = [ + "docify", "either", "hash256-std-hasher", "impl-trait-for-tuples", @@ -17507,6 +17552,7 @@ dependencies = [ "scale-info", "serde", "serde_json", + "simple-mermaid 0.1.0 (git+https://github.com/kianenigma/simple-mermaid.git?branch=main)", "sp-api", "sp-application-crypto", "sp-arithmetic", @@ -18293,26 +18339,6 @@ dependencies = [ "sc-cli", ] -[[package]] -name = "substrate" -version = "1.0.0" -dependencies = [ - "frame-support", - "sc-chain-spec", - "sc-cli", - "sc-consensus-aura", - "sc-consensus-babe", - "sc-consensus-beefy", - "sc-consensus-grandpa", - "sc-consensus-manual-seal", - "sc-consensus-pow", - "sc-service", - "simple-mermaid", - "sp-runtime", - "staging-chain-spec-builder", - "subkey", -] - [[package]] name = "substrate-bip39" version = "0.4.4" diff --git a/Cargo.toml b/Cargo.toml index ac474ae5686..a295aca819c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ license = "GPL-3.0-only" resolver = "2" members = [ + "developer-hub", "bridges/bin/runtime-common", "bridges/modules/grandpa", "bridges/modules/messages", @@ -186,7 +187,6 @@ members = [ "polkadot/xcm/xcm-simulator", "polkadot/xcm/xcm-simulator/example", "polkadot/xcm/xcm-simulator/fuzzer", - "substrate", "substrate/bin/minimal/node", "substrate/bin/minimal/runtime", "substrate/bin/node-template/node", @@ -476,8 +476,7 @@ default-members = [ "polkadot", "substrate/bin/node/cli" ] panic = "unwind" opt-level = 3 -# make sure dev builds with backtrace do -# not slow us down +# make sure dev builds with backtrace do not slow us down [profile.dev.package.backtrace] inherits = "release" diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index 7a064e227d4..be76855c05b 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -513,8 +513,7 @@ impl pallet_parachain_template::Config for Runtime { // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( - pub enum Runtime - { + pub struct Runtime { // System support stuff. System: frame_system = 0, ParachainSystem: cumulus_pallet_parachain_system = 1, diff --git a/developer-hub/Cargo.toml b/developer-hub/Cargo.toml new file mode 100644 index 00000000000..c19d5480ec4 --- /dev/null +++ b/developer-hub/Cargo.toml @@ -0,0 +1,66 @@ +[package] +name = "developer-hub" +description = "The one stop shop for developers of the polakdot-sdk" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "paritytech.github.io" +repository.workspace = true +authors.workspace = true +edition.workspace = true +# This crate is not publish-able to crates.io for now because of docify. +publish = false +version = "0.0.1" + +[dependencies] +# Needed for all FRAME-based code +parity-scale-codec = { version = "3.0.0", default-features = false } +scale-info = { version = "2.6.0", default-features = false } +frame = { path = "../substrate/frame", features = ["runtime", "experimental"] } +pallet-examples = { path = "../substrate/frame/examples" } +pallet-default-config-example = { path = "../substrate/frame/examples/default-config" } + +# How we build docs in rust-docs +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } +docify = "0.2.6" + +# Polkadot SDK deps, typically all should only be scope such that we can link to their doc item. +node-cli = { package = "staging-node-cli", path = "../substrate/bin/node/cli" } +kitchensink-runtime = { path = "../substrate/bin/node/runtime" } +chain-spec-builder = { package = "staging-chain-spec-builder", path = "../substrate/bin/utils/chain-spec-builder" } +subkey = { path = "../substrate/bin/utils/subkey" } + +# Substrate +sc-network = { path = "../substrate/client/network" } +sc-rpc-api = { path = "../substrate/client/rpc-api" } +sc-rpc = { path = "../substrate/client/rpc" } +sc-client-db = { path = "../substrate/client/db" } +sc-cli = { path = "../substrate/client/cli" } +sc-consensus-aura = { path = "../substrate/client/consensus/aura" } +sc-consensus-babe = { path = "../substrate/client/consensus/babe" } +sc-consensus-grandpa = { path = "../substrate/client/consensus/grandpa" } +sc-consensus-beefy = { path = "../substrate/client/consensus/beefy" } +sc-consensus-manual-seal = { path = "../substrate/client/consensus/manual-seal" } +sc-consensus-pow = { path = "../substrate/client/consensus/pow" } +substrate-wasm-builder = { path = "../substrate/utils/wasm-builder" } + +# Cumulus +cumulus-pallet-aura-ext = { path = "../cumulus/pallets/aura-ext" } +cumulus-pallet-parachain-system = { path = "../cumulus/pallets/parachain-system", features = [ + "parameterized-consensus-hook", +] } +parachain-info = { package = "staging-parachain-info", path = "../cumulus/parachains/pallets/parachain-info" } +pallet-aura = { path = "../substrate/frame/aura", default-features = false } +pallet-timestamp = { path = "../substrate/frame/timestamp" } + +# Primitives +sp-io = { path = "../substrate/primitives/io" } +sp-api = { path = "../substrate/primitives/api" } +sp-core = { path = "../substrate/primitives/core" } +sp-keyring = { path = "../substrate/primitives/keyring" } +sp-runtime = { path = "../substrate/primitives/runtime" } + +[dev-dependencies] +parity-scale-codec = "3.6.5" +scale-info = "2.9.0" + +[features] +experimental = [ "pallet-aura/experimental" ] diff --git a/developer-hub/headers/toc.html b/developer-hub/headers/toc.html new file mode 100644 index 00000000000..d4d017eb207 --- /dev/null +++ b/developer-hub/headers/toc.html @@ -0,0 +1,54 @@ + + diff --git a/developer-hub/src/guides/changing_consensus.rs b/developer-hub/src/guides/changing_consensus.rs new file mode 100644 index 00000000000..7ba742f1072 --- /dev/null +++ b/developer-hub/src/guides/changing_consensus.rs @@ -0,0 +1 @@ +//! # Changing Consensus diff --git a/developer-hub/src/guides/cumulus_enabled_parachain.rs b/developer-hub/src/guides/cumulus_enabled_parachain.rs new file mode 100644 index 00000000000..fafd97feb82 --- /dev/null +++ b/developer-hub/src/guides/cumulus_enabled_parachain.rs @@ -0,0 +1 @@ +//! # Cumulus Enabled Parachain diff --git a/developer-hub/src/guides/mod.rs b/developer-hub/src/guides/mod.rs new file mode 100644 index 00000000000..21de220a3b8 --- /dev/null +++ b/developer-hub/src/guides/mod.rs @@ -0,0 +1,25 @@ +//! # Polkadot Developer Hub Guides +//! +//! This crate contains a collection of guides that are foundational to the developers of +//! Polkadot SDK. They common user-journeys that are traversed in the Polkadot ecosystem. + +/// Write your first simple pallet, learning the most most basic features of FRAME along the way. +pub mod your_first_pallet; + +/// Writing your first real [runtime](`crate::reference_docs::wasm_meta_protocol`), and successfully +/// compiling it to [WASM](crate::polkadot_sdk::substrate#wasm-build). +pub mod your_first_runtime; + +/// Running the given runtime with a node. No specific consensus mechanism is used at this stage. +pub mod your_first_node; + +/// How to change the consensus engine of both the node and the runtime. +pub mod changing_consensus; + +/// How to enhance a given runtime and node to be cumulus-enabled, run it as a parachain and connect +/// it to a relay-chain. +pub mod cumulus_enabled_parachain; + +/// How to make a given runtime XCM-enabled, capable of sending messages (`Transact`) between itself +/// and the relay chain to which it is connected. +pub mod xcm_enabled_parachain; diff --git a/developer-hub/src/guides/xcm_enabled_parachain.rs b/developer-hub/src/guides/xcm_enabled_parachain.rs new file mode 100644 index 00000000000..4518cab9342 --- /dev/null +++ b/developer-hub/src/guides/xcm_enabled_parachain.rs @@ -0,0 +1 @@ +//! # XCM Enabled Parachain diff --git a/developer-hub/src/guides/your_first_node.rs b/developer-hub/src/guides/your_first_node.rs new file mode 100644 index 00000000000..d12349c9906 --- /dev/null +++ b/developer-hub/src/guides/your_first_node.rs @@ -0,0 +1 @@ +//! # Your first Node diff --git a/developer-hub/src/guides/your_first_pallet/mod.rs b/developer-hub/src/guides/your_first_pallet/mod.rs new file mode 100644 index 00000000000..0d2cc26d757 --- /dev/null +++ b/developer-hub/src/guides/your_first_pallet/mod.rs @@ -0,0 +1,738 @@ +//! # Currency Pallet +//! +//! By the end of this guide, you will write a small FRAME pallet (see +//! [`crate::polkadot_sdk::frame_runtime`]) that is capable of handling a simple crypto-currency. +//! This pallet will: +//! +//! 1. Allow anyone to mint new tokens into accounts (which is obviously not a great idea for a real +//! system). +//! 2. Allow any user that owns tokens to transfer them to others. +//! 3. Track the total issuance of all tokens at all times. +//! +//! > This guide will build a currency pallet from scratch using only the lowest primitives of +//! > FRAME, and is mainly intended for education, not *applicability*. For example, almost all +//! > FRAME-based runtimes use various techniques to re-use a currency pallet instead of writing +//! > one. Further advance FRAME related topics are discussed in [`crate::reference_docs`]. +//! +//! ## Topics Covered +//! +//! The following FRAME topics are covered in this guide. See the documentation of the +//! associated items to know more. +//! +//! - [Storage](frame::pallet_macros::storage) +//! - [Call](frame::pallet_macros::call) +//! - [Event](frame::pallet_macros::event) +//! - [Error](frame::pallet_macros::error) +//! - Basics of testing a pallet +//! - [Constructing a runtime](frame::runtime::prelude::construct_runtime) +//! +//! ## Writing Your First Pallet +//! +//! You should have studied the following modules as a prelude to this guide: +//! +//! - [`crate::reference_docs::blockchain_state_machines`] +//! - [`crate::reference_docs::trait_based_programming`] +//! - [`crate::polkadot_sdk::frame_runtime`] +//! +//! ### Shell Pallet +//! +//! Consider the following as a "shell pallet". We continue building the rest of this pallet based +//! on this template. +//! +//! [`pallet::config`](frame::pallet_macros::config) and +//! [`pallet::pallet`](frame::pallet_macros::pallet) are both mandatory parts of any pallet. Refer +//! to the documentation of each to get an overview of what they do. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", shell_pallet)] +//! +//! ### Storage +//! +//! First, we will need to create two onchain storage declarations. +//! +//! One should be a mapping from account-ids to a balance type, and one value that is the total +//! issuance. +// +// For the rest of this guide, we will opt for a balance type of u128. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balance)] +//! +//! The definition of these two storage items, based on [`frame::pallet_macros::storage`] details, +//! is as follows: +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", TotalIssuance)] +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Balances)] +//! +//! ### Dispatchables +//! +//! Next, we will define the dispatchable functions. As per [`frame::pallet_macros::call`], these +//! will be defined as normal `fn`s attached to `struct Pallet`. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_pallet)] +//! +//! The logic of the functions is self-explanatory. Instead, we will focus on the FRAME-related +//! details: +//! +//! - Where do `T::AccountId` and `T::RuntimeOrigin` come from? These are both defined in +//! [`frame::prelude::frame_system::Config`], therefore we can access them in `T`. +//! - What is `ensure_signed`, and what does it do with the aforementioned `T::RuntimeOrigin`? This +//! is outside the scope of this guide, and you can learn more about it in the origin reference +//! document ([`crate::reference_docs::frame_origin`]). For now, you should only know the +//! signature of the function: it takes a generic `T::RuntimeOrigin` and returns a +//! `Result`. So by the end of this function call, we know that this dispatchable +//! was signed by `who`. +#![doc = docify::embed!("../substrate/frame/system/src/lib.rs", ensure_signed)] +//! +//! +//! - Where does `mutate`, `get` and `insert` and other storage APIs come from? All of them are +//! explained in the corresponding `type`, for example, for `Balances::::insert`, you can look +//! into [`frame::prelude::StorageMap::insert`]. +//! +//! - The return type of all dispatchable functions is [`frame::prelude::DispatchResult`]: +#![doc = docify::embed!("../substrate/frame/support/src/dispatch.rs", DispatchResult)] +//! +//! Which is more or less a normal Rust `Result`, with a custom [`frame::prelude::DispatchError`] as +//! the `Err` variant. We won't cover this error in detail here, but importantly you should know +//! that there is an `impl From<&'static string> for DispatchError` provided (see +//! [here](`frame::prelude::DispatchError#impl-From<%26'static+str>-for-DispatchError`)). Therefore, +//! we can use basic string literals as our error type and `.into()` them into `DispatchError`. +//! +//! - Why are all `get` and `mutate` functions returning an `Option`? This is the default behavior +//! of FRAME storage APIs. You can learn more about how to override this by looking into +//! [`frame::pallet_macros::storage`], and +//! [`frame::prelude::ValueQuery`]/[`frame::prelude::OptionQuery`] +//! +//! ### Improving Errors +//! +//! How we handle error in the above snippets is fairly rudimentary. Let's look at how this can be +//! improved. First, we can use [`frame::prelude::ensure`] to express the error slightly better. +//! This macro will call `.into()` under the hood. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better)] +//! +//! Moreover, you will learn in the [Safe Defensive Programming +//! section](crate::reference_docs::safe_defensive_programming) that it is always recommended to use +//! safe arithmetic operations in your runtime. By using [`frame::traits::CheckedSub`], we can not +//! only take a step in that direction, but also improve the error handing and make it slightly more +//! ergonomic. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_better_checked)] +//! +//! This is more or less all the logic that there is this basic currency pallet! +//! +//! ### Your First (Test) Runtime +//! +//! Next, we create a "test runtime" in order to test our pallet. Recall from +//! [`crate::polkadot_sdk::frame_runtime`] that a runtime is a collection of pallets, expressed +//! through [`frame::runtime::prelude::construct_runtime`]. All runtimes also have to include +//! [`frame::prelude::frame_system`]. So we expect to see a runtime with two pallet, `frame_system` +//! and the one we just wrote. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime)] +//! +//! > [`frame::pallet_macros::derive_impl`] is a FRAME feature that enables developers to have +//! > defaults for associated types. +//! +//! Recall that within our pallet, (almost) all blocks of code are generic over ``. And, +//! because `trait Config: frame_system::Config`, we can get access to all items in `Config` (or +//! `frame_system::Config`) using `T::NameOfItem`. This is all within the boundaries of how Rust +//! traits and generics work. If unfamiliar with this pattern, read +//! [`crate::reference_docs::trait_based_programming`] before going further. +//! +//! Crucially, a typical FRAME runtime contains a `struct Runtime`. The main role of this `struct` +//! is to implement the `trait Config` of all pallets. That is, anywhere within your pallet code +//! where you see `` (read: *"some type `T` that implements `Config`"*), in the runtime, +//! it can be replaced with ``, because `Runtime` implements `Config` of all pallets, as we +//! see above. +//! +//! Another way to think about this is that within a pallet, a lot of types are "unknown" and, we +//! only know that they will be provided at some later point. For example, when you write +//! `T::AccountId` (which is short for `::AccountId`) in your pallet, +//! you are in fact saying "*Some type `AccountId` that will be known later*". That "later" is in +//! fact when you specify these types when you implement all `Config` traits for `Runtime`. +//! +//! As you see above, `frame_system::Config` is setting the `AccountId` to `u64`. Of course, a real +//! runtime will not use this type, and instead reside to a proper type like a 32-byte standard +//! public key. This is a HUGE benefit that FRAME developers can tap into: through the framework +//! being so generic, different types can always be customized to simple things when needed. +//! +//! > Imagine how hard it would have been if all tests had to use a real 32-byte account id, as +//! > opposed to just a u64 number 🙈. +//! +//! ### Your First Test +//! +//! The above is all you need to execute the dispatchables of your pallet. The last thing you need +//! to learn is that all of your pallet testing code should be wrapped in +//! [`frame::testing_prelude::TestState`]. This is a type that provides access to an in-memory state +//! to be used in our tests. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", first_test)] +//! +//! In the first test, we simply assert that there is no total issuance, and no balance associated +//! with account `1`. Then, we mint some balance into `1`, and re-check. +//! +//! As noted above, the `T::AccountId` is now `u64`. Moreover, `Runtime` is replacing ``. +//! This is why for example you see `Balances::::get(..)`. Finally, notice that the +//! dispatchables are simply functions that can be called on top of the `Pallet` struct. +//! +//! TODO: hard to explain exactly `RuntimeOrigin::signed(1)` at this point. +//! +//! Congratulations! You have written your first pallet and tested it! Next, we learn a few optional +//! steps to improve our pallet. +//! +//! ## Improving the Currency Pallet +//! +//! ### Better Test Setup +//! +//! Idiomatic FRAME pallets often use Builder pattern to define their initial state. +//! +//! > The Polkadot Blockchain Academy's Rust entrance exam has a +//! > [section](https://github.com/Polkadot-Blockchain-Academy/pba-qualifier-exam/blob/main/src/m_builder.rs) +//! > on this that you can use to learn the Builder Pattern. +//! +//! Let's see how we can implement a better test setup using this pattern. First, we define a +//! `struct StateBuilder`. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", StateBuilder)] +//! +//! This struct is meant to contain the same list of accounts and balances that we want to have at +//! the beginning of each block. We hardcoded this to `let accounts = vec![(1, 100), (2, 100)];` so +//! far. Then, if desired, we attach a default value for this struct. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", default_state_builder)] +//! +//! Like any other builder pattern, we attach functions to the type to mutate its internal +//! properties. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_state_builder_add)] +//! +//! Finally --the useful part-- we write our own custom `build_and_execute` function on +//! this type. This function will do multiple things: +//! +//! 1. It would consume `self` to produce our `TestState` based on the properties that we attached +//! to `self`. +//! 2. It would execute any test function that we pass in as closure. +//! 3. A nifty trick, this allows our test setup to have some code that is executed both before and +//! after each test. For example, in this test, we do some additional checking about the +//! correctness of the `TotalIssuance`. We leave it up to you as an exercise to learn why the +//! assertion should always hold, and how it is checked. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", impl_state_builder_build)] +//! +//! We can write tests that specifically check the initial state, and making sure our `StateBuilder` +//! is working exactly as intended. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", state_builder_works)] +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", state_builder_add_balance)] +//! +//! ### More Tests +//! +//! Now that we have a more ergonomic test setup, let's see how a well written test for transfer and +//! mint would look like. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_works)] +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", mint_works)] +//! +//! It is always a good idea to build a mental model where you write *at least* one test for each +//! "success path" of a dispatchable, and one test for each "failure path", such as: +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_from_non_existent_fails)] +//! +//! We leave it up to you to write a test that triggers to `InsufficientBalance` error. +//! +//! ### Event and Error +//! +//! Our pallet is mainly missing two parts that are common in most FRAME pallets: Events, and +//! Errors. First, let's understand what each is. +//! +//! - **Error**: The static string-based error scheme we used so far is good for readability, but it +//! has a few drawbacks. These string literals will bloat the final wasm blob, and are relatively +//! heavy to transmit and encode/decode. Moreover, it is easy to mistype them by one character. +//! FRAME errors are exactly a solution to maintain readability, whilst fixing the drawbacks +//! mentioned. In short, we use an enum to represent different variants of our error. These +//! variants are then mapped in an efficient way (using only `u8` indices) to +//! [`sp_runtime::DispatchError::Module`] Read more about this in [`frame::pallet_macros::error`]. +//! +//! - **Event**: Events are akin to the return type of dispatchables. They should represent what +//! happened at the end of a dispatch operation. Therefore, the convention is to use passive tense +//! for event names (eg. `SomethingHappened`). This allows other sub-systems or external parties +//! (eg. a light-node, a DApp) to listen to particular events happening, without needing to +//! re-execute the whole state transition function. +//! +//! TODO: both need to be improved a lot at the pallet-macro rust-doc level. Also my explanation +//! of event is probably not the best. +//! +//! With the explanation out of the way, let's see how these components can be added. Both follow a +//! fairly familiar syntax: normal Rust enums, with an extra `#[frame::event/error]` attribute +//! attached. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Event)] +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", Error)] +//! +//! One slightly custom part of this is the `#[pallet::generate_deposit(pub(super) fn +//! deposit_event)]` part. Without going into too much detail, in order for a pallet to emit events +//! to the rest of the system, it needs to do two things: +//! +//! 1. Declare a type in its `Config` that refers to the overarching event type of the runtime. In +//! short, by doing this, the pallet is expressing an important bound: `type RuntimeEvent: +//! From>`. Read: a `RuntimeEvent` exists, and it can be created from the local `enum +//! Event` of this pallet. This enables the pallet to convert its `Event` into `RuntimeEvent`, and +//! store it where needed. +//! +//! 2. But, doing this conversion and storing is too much to expect each pallet to define. FRAME +//! provides a default way of storing events, and this is what `pallet::generate_deposit` is doing. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", config_v2)] +//! +//! > These `Runtime*` types are better explained in +//! > [`crate::reference_docs::frame_composite_enums`]. +//! +//! Then, we can rewrite the `transfer` dispatchable as such: +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", transfer_v2)] +//! +//! Then, notice how now we would need to provide this `type RuntimeEvent` in our test runtime +//! setup. +#![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", runtime_v2)] +//! +//! In this snippet, the actual `RuntimeEvent` type (right hand side of `type RuntimeEvent = +//! RuntimeEvent`) is generated by `construct_runtime`. An interesting way to inspect this type is +//! to see its definition in rust-docs: +//! [`crate::guides::your_first_pallet::pallet_v2::tests::runtime_v2::RuntimeEvent`]. +//! +//! +//! +//! ## What Next? +//! +//! The following topics where used in this guide, but not covered in depth. It is suggested to +//! study them subsequently: +//! +//! - [`crate::reference_docs::safe_defensive_programming`]. +//! - [`crate::reference_docs::frame_origin`]. +//! - [`crate::reference_docs::frame_composite_enums`]. +//! - The pallet we wrote in this guide was using `dev_mode`, learn more in +//! [`frame::pallet_macros::config`]. +//! - Learn more about the individual pallet items/macros, such as event and errors and call, in +//! [`frame::pallet_macros`]. + +#[docify::export] +#[frame::pallet(dev_mode)] +pub mod shell_pallet { + use frame::prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); +} + +#[frame::pallet(dev_mode)] +pub mod pallet { + use frame::prelude::*; + + #[docify::export] + pub type Balance = u128; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export] + /// Single storage item, of type `Balance`. + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + /// A mapping from `T::AccountId` to `Balance` + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[docify::export(impl_pallet)] + #[pallet::call] + impl Pallet { + /// An unsafe mint that can be called by anyone. Not a great idea. + pub fn mint_unsafe( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + // ensure that this is a signed account, but we don't really check `_anyone`. + let _anyone = ensure_signed(origin)?; + + // update the balances map. Notice how all `` remains as ``. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + // update total issuance. + TotalIssuance::::mutate(|t| *t = Some(t.unwrap_or(0) + amount)); + + Ok(()) + } + + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + if sender_balance < amount { + return Err("InsufficientBalance".into()) + } + let reminder = sender_balance - amount; + + // update sender and dest balances. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, reminder); + + Ok(()) + } + } + + #[allow(unused)] + impl Pallet { + #[docify::export] + pub fn transfer_better( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + ensure!(sender_balance >= amount, "InsufficientBalance"); + let reminder = sender_balance - amount; + + // .. snip + Ok(()) + } + + #[docify::export] + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer_better_checked( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + let reminder = sender_balance.checked_sub(amount).ok_or("InsufficientBalance")?; + + // .. snip + Ok(()) + } + } + + #[cfg(any(test, doc))] + pub(crate) mod tests { + use crate::guides::your_first_pallet::pallet::*; + use frame::testing_prelude::*; + + #[docify::export] + mod runtime { + use super::*; + // we need to reference our `mod pallet` as an identifier to pass to + // `construct_runtime`. + use crate::guides::your_first_pallet::pallet as pallet_currency; + + construct_runtime!( + pub struct Runtime { + // ---^^^^^^ This is where `struct Runtime` is defined. + System: frame_system, + Currency: pallet_currency, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + // within pallet we just said `::AccountId`, now we + // finally specified it. + type AccountId = u64; + } + + // our simple pallet has nothing to be configured. + impl pallet_currency::Config for Runtime {} + } + + pub(crate) use runtime::*; + + #[allow(unused)] + #[docify::export] + fn new_test_state_basic() -> TestState { + let mut state = TestState::new_empty(); + let accounts = vec![(1, 100), (2, 100)]; + state.execute_with(|| { + for (who, amount) in &accounts { + Balances::::insert(who, amount); + TotalIssuance::::mutate(|b| *b = Some(b.unwrap_or(0) + amount)); + } + }); + + state + } + + #[docify::export] + pub(crate) struct StateBuilder { + balances: Vec<(::AccountId, Balance)>, + } + + #[docify::export(default_state_builder)] + impl Default for StateBuilder { + fn default() -> Self { + Self { balances: vec![(1, 100), (2, 100)] } + } + } + + #[docify::export(impl_state_builder_add)] + impl StateBuilder { + fn add_balance( + mut self, + who: ::AccountId, + amount: Balance, + ) -> Self { + self.balances.push((who, amount)); + self + } + } + + #[docify::export(impl_state_builder_build)] + impl StateBuilder { + pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = TestState::new_empty(); + ext.execute_with(|| { + for (who, amount) in &self.balances { + Balances::::insert(who, amount); + TotalIssuance::::mutate(|b| *b = Some(b.unwrap_or(0) + amount)); + } + }); + + ext.execute_with(test); + + // assertions that must always hold + ext.execute_with(|| { + assert_eq!( + Balances::::iter().map(|(_, x)| x).sum::(), + TotalIssuance::::get().unwrap_or_default() + ); + }) + } + } + + #[docify::export] + #[test] + fn first_test() { + TestState::new_empty().execute_with(|| { + // We expect account 1 to have no funds. + assert_eq!(Balances::::get(&1), None); + assert_eq!(TotalIssuance::::get(), None); + + // mint some funds into 1 + assert_ok!(Pallet::::mint_unsafe(RuntimeOrigin::signed(1), 1, 100)); + + // re-check the above + assert_eq!(Balances::::get(&1), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(100)); + }) + } + + #[docify::export] + #[test] + fn state_builder_works() { + StateBuilder::default().build_and_execute(|| { + assert_eq!(Balances::::get(&1), Some(100)); + assert_eq!(Balances::::get(&2), Some(100)); + assert_eq!(Balances::::get(&3), None); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + + #[docify::export] + #[test] + fn state_builder_add_balance() { + StateBuilder::default().add_balance(3, 42).build_and_execute(|| { + assert_eq!(Balances::::get(&3), Some(42)); + assert_eq!(TotalIssuance::::get(), Some(242)); + }) + } + + #[test] + #[should_panic] + fn state_builder_duplicate_genesis_fails() { + StateBuilder::default() + .add_balance(3, 42) + .add_balance(3, 43) + .build_and_execute(|| { + assert_eq!(Balances::::get(&3), None); + assert_eq!(TotalIssuance::::get(), Some(242)); + }) + } + + #[docify::export] + #[test] + fn mint_works() { + StateBuilder::default().build_and_execute(|| { + // given the initial state, when: + assert_ok!(Pallet::::mint_unsafe(RuntimeOrigin::signed(1), 2, 100)); + + // then: + assert_eq!(Balances::::get(&2), Some(200)); + assert_eq!(TotalIssuance::::get(), Some(300)); + + // given: + assert_ok!(Pallet::::mint_unsafe(RuntimeOrigin::signed(1), 3, 100)); + + // then: + assert_eq!(Balances::::get(&3), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(400)); + }); + } + + #[docify::export] + #[test] + fn transfer_works() { + StateBuilder::default().build_and_execute(|| { + // given the the initial state, when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(1), 2, 50)); + + // then: + assert_eq!(Balances::::get(&1), Some(50)); + assert_eq!(Balances::::get(&2), Some(150)); + assert_eq!(TotalIssuance::::get(), Some(200)); + + // when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(2), 1, 50)); + + // then: + assert_eq!(Balances::::get(&1), Some(100)); + assert_eq!(Balances::::get(&2), Some(100)); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + + #[docify::export] + #[test] + fn transfer_from_non_existent_fails() { + StateBuilder::default().build_and_execute(|| { + // given the the initial state, when: + assert_err!( + Pallet::::transfer(RuntimeOrigin::signed(3), 1, 10), + "NonExistentAccount" + ); + + // then nothing has changed. + assert_eq!(Balances::::get(&1), Some(100)); + assert_eq!(Balances::::get(&2), Some(100)); + assert_eq!(Balances::::get(&3), None); + assert_eq!(TotalIssuance::::get(), Some(200)); + }); + } + } +} + +#[frame::pallet(dev_mode)] +pub mod pallet_v2 { + use super::pallet::Balance; + use frame::prelude::*; + + #[docify::export(config_v2)] + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type of the runtime. + type RuntimeEvent: From> + + IsType<::RuntimeEvent> + + TryInto>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + #[pallet::error] + pub enum Error { + /// Account does not exist. + NonExistentAccount, + /// Account does not have enough balance. + InsufficientBalance, + } + + #[docify::export] + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A transfer succeeded. + Transferred { from: T::AccountId, to: T::AccountId, amount: Balance }, + } + + #[pallet::call] + impl Pallet { + #[docify::export(transfer_v2)] + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = + Balances::::get(&sender).ok_or(Error::::NonExistentAccount)?; + let reminder = + sender_balance.checked_sub(amount).ok_or(Error::::InsufficientBalance)?; + + Balances::::mutate(&dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, reminder); + + Self::deposit_event(Event::::Transferred { from: sender, to: dest, amount }); + + Ok(()) + } + } + + #[cfg(any(test, doc))] + pub mod tests { + use super::{super::pallet::tests::StateBuilder, *}; + use frame::testing_prelude::*; + + #[docify::export] + pub mod runtime_v2 { + use super::*; + use crate::guides::your_first_pallet::pallet_v2 as pallet_currency; + + construct_runtime!( + pub struct Runtime { + System: frame_system, + Currency: pallet_currency, + } + ); + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + type AccountId = u64; + } + + impl pallet_currency::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + } + } + + pub(crate) use runtime_v2::*; + + #[docify::export(transfer_works_v2)] + #[test] + fn transfer_works() { + StateBuilder::default().build_and_execute(|| { + // skip the genesis block, as events are not deposited there and we need them for + // the final assertion. + System::set_block_number(1); + + // given the the initial state, when: + assert_ok!(Pallet::::transfer(RuntimeOrigin::signed(1), 2, 50)); + + // then: + assert_eq!(Balances::::get(&1), Some(50)); + assert_eq!(Balances::::get(&2), Some(150)); + assert_eq!(TotalIssuance::::get(), Some(200)); + + // now we can also check that an event has been deposited: + assert_eq!( + System::read_events_for_pallet::>(), + vec![Event::Transferred { from: 1, to: 2, amount: 50 }] + ); + }); + } + } +} diff --git a/developer-hub/src/guides/your_first_pallet/with_event.rs b/developer-hub/src/guides/your_first_pallet/with_event.rs new file mode 100644 index 00000000000..a65aac324f0 --- /dev/null +++ b/developer-hub/src/guides/your_first_pallet/with_event.rs @@ -0,0 +1,101 @@ +#[frame::pallet(dev_mode)] +pub mod pallet { + use frame::prelude::*; + + #[docify::export] + pub type Balance = u128; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[docify::export] + /// Single storage item, of type `Balance`. + #[pallet::storage] + pub type TotalIssuance = StorageValue<_, Balance>; + + #[docify::export] + /// A mapping from `T::AccountId` to `Balance` + #[pallet::storage] + pub type Balances = StorageMap<_, _, T::AccountId, Balance>; + + #[docify::export(impl_pallet)] + #[pallet::call] + impl Pallet { + /// An unsafe mint that can be called by anyone. Not a great idea. + pub fn mint_unsafe( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + // ensure that this is a signed account, but we don't really check `_anyone`. + let _anyone = ensure_signed(origin)?; + + // update the balances map. Notice how all `` remains as ``. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + // update total issuance. + TotalIssuance::::mutate(|t| *t = Some(t.unwrap_or(0) + amount)); + + Ok(()) + } + + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + // ensure sender has enough balance, and if so, calculate what is left after `amount`. + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + if sender_balance < amount { + return Err("NotEnoughBalance".into()) + } + let reminder = sender_balance - amount; + + // update sender and dest balances. + Balances::::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount)); + Balances::::insert(&sender, reminder); + + Ok(()) + } + } + + #[allow(unused)] + impl Pallet { + #[docify::export] + pub fn transfer_better( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + ensure!(sender_balance >= amount, "NotEnoughBalance"); + let reminder = sender_balance - amount; + + // .. snip + Ok(()) + } + + #[docify::export] + /// Transfer `amount` from `origin` to `dest`. + pub fn transfer_better_checked( + origin: T::RuntimeOrigin, + dest: T::AccountId, + amount: Balance, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + + let sender_balance = Balances::::get(&sender).ok_or("NonExistentAccount")?; + let reminder = sender_balance.checked_sub(amount).ok_or("NotEnoughBalance")?; + + // .. snip + Ok(()) + } + } +} diff --git a/developer-hub/src/guides/your_first_runtime.rs b/developer-hub/src/guides/your_first_runtime.rs new file mode 100644 index 00000000000..3e02ef1b1b2 --- /dev/null +++ b/developer-hub/src/guides/your_first_runtime.rs @@ -0,0 +1 @@ +//! # Your first Runtime diff --git a/developer-hub/src/lib.rs b/developer-hub/src/lib.rs new file mode 100644 index 00000000000..eb71865ca44 --- /dev/null +++ b/developer-hub/src/lib.rs @@ -0,0 +1,43 @@ +//! # Developer Hub +//! +//! The Polkadot SDK Developer Hub. +//! +//! This crate is a *minimal*, but *always-accurate* source of information for those wishing to +//! build on the Polkadot SDK. +//! +//! > **Work in Progress**: This crate is under heavy development. Expect content to be moved and +//! > changed. Do not use links to this crate yet. See [`meta_contributing`] for more information. +//! +//! ## Getting Started +//! +//! We suggest the following reading sequence: +//! +//! - Start by learning about the the [`polkadot_sdk`], its structure and context. +//! - Then, head over the [`guides`]. This modules contains in-depth guides about the most important +//! user-journeys of the Polkadot SDK. +//! - Whilst reading the guides, you might find back-links to [`crate::reference_docs`]. +//! - Finally, is the parent website of this crate that contains the +//! list of further tools related to the Polkadot SDK. +//! +//! ## Information Architecture +//! +//! This section paints a picture over the high-level information architecture of this crate. +#![doc = simple_mermaid::mermaid!("../../docs/mermaid/IA.mmd")] +#![allow(rustdoc::invalid_html_tags)] // TODO: remove later. https://github.com/paritytech/polkadot-sdk-docs/issues/65 +#![allow(rustdoc::bare_urls)] // TODO: remove later. https://github.com/paritytech/polkadot-sdk-docs/issues/65 +#![warn(rustdoc::broken_intra_doc_links)] +#![warn(rustdoc::private_intra_doc_links)] + +/// Meta information about this crate, how it is built, what principles dictates its evolution and +/// how one can contribute to it. +pub mod meta_contributing; + +/// In-depth guides about the most common components of the Polkadot SDK. They are slightly more +/// high level and broad than reference docs. +pub mod guides; +/// An introduction to the Polkadot SDK. Read this module to learn about the structure of the SDK, +/// the tools that are provided as a part of it, and to gain a high level understanding of each. +pub mod polkadot_sdk; +/// Reference documents covering in-depth topics across the Polkadot SDK. It is suggested to read +/// these on-demand, while you are going through the [`guides`] or other content. +pub mod reference_docs; diff --git a/developer-hub/src/meta_contributing.rs b/developer-hub/src/meta_contributing.rs new file mode 100644 index 00000000000..70a23f82583 --- /dev/null +++ b/developer-hub/src/meta_contributing.rs @@ -0,0 +1,146 @@ +//! # Contribution +//! +//! The following sections cover more detailed information about this crate and how it should be +//! maintained. +//! +//! ## Why Rust Docs? +//! +//! We acknowledge that blockchain based systems, particularly a cutting-edge one like Polkadot SDK +//! is a software artifact that is complex, and rapidly evolving. This makes the task of documenting +//! it externally extremely difficult, especially with regards to making sure it is up-to-date. +//! +//! Consequently, we argue that the best hedge against this is to move as much of the documentation +//! near the source code as possible. This would further incentivize developers to keep the +//! documentation up-to-date, as the overhead is reduced by making sure everything is in one +//! repository, and everything being in `.rs` files. +//! +//! > This is not say that a more visually appealing version of this crate (for example as an +//! > `md-book`) cannot exist, but it would be outside the scope of this crate. +//! +//! Moreover, we acknowledge that a major pain point has been not only outdated *concepts*, but also +//! *outdated code*. For this, we commit to making sure no code-snippet in this crate is left as +//! `///ignore` or `///no_compile`, making sure all code snippets are self-contained, compile-able, +//! and correct at every single revision of the entire repository. +//! +//! > This also allows us to have a clear versioning on the entire content of this crate. For every +//! commit of the Polkadot SDK, there would be one version of this crate that is guaranteed to be +//! correct. +//! +//! > To achieve this, we often use [`docify`](https://github.com/sam0x17/docify), a nifty invention +//! > of `@sam0x17`. +//! +//! Also see: . +//! +//! ## Scope +//! +//! The above would NOT be attainable if we don't acknowledge that the scope of this crate MUST be +//! limited, or else its maintenance burden would be infeasible or not worthwhile. In short, future +//! maintainers should always strive to keep the content of this repository as minimal as possible. +//! Some of the following principles are specifically there to be the guidance for this. +//! +//! ## Principles +//! +//! The following guidelines are meant to be the guiding torch of those who contribute to this +//! crate. +//! +//! 1. 🔺 Ground Up: Information should be layed out in the most ground-up fashion. The lowest level +//! (i.e. "ground") is Rust-docs. The highest level (i.e. "up") is "outside of this crate". In +//! between lies [`reference_docs`] and [`guides`], from low to high. The point of this principle +//! is to document as much of the information as possible in the lower level media, as it is +//! easier to maintain and more reachable. Then, use excessive linking to back-link when writing +//! in a more high level. +//! +//! > A prime example of this, the details of the FRAME storage APIs should NOT be explained in a +//! > high level tutorial. They should be explained in the rust-doc of the corresponding type or +//! > macro. +//! +//! 2. 🧘 Less is More: For reasons mentioned [above](#crate::why-rust-docs), the more concise this +//! crate is, the better. +//! 3. √ Don’t Repeat Yourself – DRY: A summary of the above two points. Authors should always +//! strive to avoid any duplicate information. Every concept should ideally be documented in +//! *ONE* place and one place only. This makes the task of maintaining topics significantly +//! easier. +//! +//! > A prime example of this, the list of CLI arguments of a particular binary should not be +//! > documented in multiple places across this crate. It should be only be documented in the +//! > corresponding crate (e.g. `sc_cli`). +//! +//! > Moreover, this means that as a contributor, **it is your responsibility to have a grasp over +//! > what topics are already covered in this crate, and how you can build on top of the information +//! > that they already pose, rather than repeating yourself**. +//! +//! For more details about documenting guidelines, see: +//! +//! +//! #### Example: Explaining `#[pallet::call]` +//! +//!

+//! +//! Let's consider the seemingly simple example of explaining to someone dead-simple code of a FRAME +//! call and see how we can use the above principles. +//! +//! +//! +//! ``` +//! #[frame::pallet(dev_mode)] +//! pub mod pallet { +//! # use frame::prelude::*; +//! # #[pallet::config] +//! # pub trait Config: frame_system::Config {} +//! # #[pallet::pallet] +//! # pub struct Pallet(_); +//! #[pallet::call] +//! impl Pallet { +//! pub fn a_simple_call(origin: OriginFor, data: u32) -> DispatchResult { +//! ensure!(data > 10, "SomeStaticString"); +//! todo!(); +//! } +//! } +//! } +//! ``` +//! +//! * Before even getting started, what is with all of this ``? We link to +//! [`crate::reference_docs::trait_based_programming`]. +//! * First, the name. Why is this called `pallet::call`? This goes back to `enum Call`, which is +//! explained in [`crate::reference_docs::frame_composite_enums`]. Build on top of this! +//! * Then, what is `origin`? Just an account id? [`crate::reference_docs::frame_origin`]. +//! * Then, what is `DispatchResult`? Why is this called *dispatch*? Probably something that can be +//! explained in the documentation of [`frame::prelude::DispatchResult`]. +//! * Why is `"SomeStaticString"` a valid error? Because there is implementation for it that you can +//! see [here](frame::prelude::DispatchError#impl-From<%26'static+str>-for-DispatchError). +//! +//! +//! All of these are examples of underlying information that a contributor should: +//! +//! 1. Try and create and they are going along. +//! 2. Back-link to if they already exist. +//! +//! Of course, all of this is not set in stone as a either/or rule. Sometimes, it is necessary to +//! rephrase a concept in a new context. +//! +//!
+//! +//! ## `docs.substrate.io` +//! +//! This crate is meant to gradually replace `docs.substrate.io`. As any content is added here, the +//! corresponding counter-part should be marked as deprecated, as described +//! [here](https://github.com/paritytech/polkadot-sdk-docs/issues/26). +//! +//! ## `crates.io` and Publishing +//! +//! As it stands now, this crate cannot be published to crates.io because of its use of +//! [workspace-level `docify`](https://github.com/sam0x17/docify/issues/22). For now, we accept this +//! compromise, but in the long term, we should work towards finding a way to maintain different +//! revisions of this crate. +//! +//! ## How to Build +//! +//! To build this crate properly, with with right HTML headers injected, run: +//! +//! ```no_compile +//! RUSTDOCFLAGS="--html-in-header $(pwd)/developer-hub/headers/toc.html" cargo doc -p developer-hub +//! ``` +//! +//! adding `--no-deps` would speed up the process while development. If even faster build time for +//! docs is needed, you can temporarily remove most of the substrate/cumulus dependencies that are +//! only used for linking purposes. diff --git a/developer-hub/src/polkadot_sdk/cumulus.rs b/developer-hub/src/polkadot_sdk/cumulus.rs new file mode 100644 index 00000000000..07a48c92d80 --- /dev/null +++ b/developer-hub/src/polkadot_sdk/cumulus.rs @@ -0,0 +1,130 @@ +//! # Cumulus +//! +//! Substrate provides a framework ([FRAME]) through which a blockchain node and runtime can easily +//! be created. Cumulus aims to extend the same approach to creation of Polkadot parachains. +//! +//! > Cumulus clouds are shaped sort of like dots; together they form a system that is intricate, +//! > beautiful and functional. +//! +//! ## Example: Runtime +//! +//! A Cumulus-based runtime is fairly similar to other [FRAME]-based runtimes. Most notably, the +//! following changes are applied to a normal FRAME-based runtime to make it a Cumulus-based +//! runtime: +//! +//! #### Cumulus Pallets +//! +//! A parachain runtime should use a number of pallets that are provided by Cumulus and Substrate. +//! Notably: +//! +//! - [`frame-system`](frame::prelude::frame_system), like all FRAME-based runtimes. +//! - [`cumulus_pallet_parachain_system`] +//! - [`parachain_info`] +#![doc = docify::embed!("./src/polkadot_sdk/cumulus.rs", system_pallets)] +//! +//! Given that all Cumulus-based runtimes use a simple Aura-based consensus mechanism, the following +//! pallets also need to be added: +//! +//! - [`pallet_timestamp`] +//! - [`pallet_aura`] +//! - [`cumulus_pallet_aura_ext`] +#![doc = docify::embed!("./src/polkadot_sdk/cumulus.rs", consensus_pallets)] +//! +//! +//! Finally, a separate macro, similar to +//! [`impl_runtime_api`](frame::runtime::prelude::impl_runtime_apis), which creates the default set +//! of runtime APIs, will generate the parachain runtime's validation runtime API, also known as +//! parachain validation function (PVF). Without this API, the relay chain is unable to validate +//! blocks produced by our parachain. +#![doc = docify::embed!("./src/polkadot_sdk/cumulus.rs", validate_block)] +//! +//! --- +//! +//! [FRAME]: crate::polkadot_sdk::frame_runtime + +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(rustdoc::private_intra_doc_links)] + +#[cfg(test)] +mod tests { + mod runtime { + pub use frame::{ + deps::sp_consensus_aura::sr25519::AuthorityId as AuraId, prelude::*, + runtime::prelude::*, testing_prelude::*, + }; + + #[docify::export(CR)] + construct_runtime!( + pub struct Runtime { + // system-level pallets. + System: frame_system, + Timestamp: pallet_timestamp, + ParachainSystem: cumulus_pallet_parachain_system, + ParachainInfo: parachain_info, + + // parachain consensus support -- mandatory. + Aura: pallet_aura, + AuraExt: cumulus_pallet_aura_ext, + } + ); + + #[docify::export] + mod system_pallets { + use super::*; + + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; + } + + impl cumulus_pallet_parachain_system::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type OnSystemEvent = (); + type SelfParaId = parachain_info::Pallet; + type OutboundXcmpMessageSource = (); + type XcmpMessageHandler = (); + type ReservedDmpWeight = (); + type ReservedXcmpWeight = (); + type CheckAssociatedRelayNumber = + cumulus_pallet_parachain_system::RelayNumberMonotonicallyIncreases; + type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook< + Runtime, + 6000, // relay chain block time + 1, + 1, + >; + type WeightInfo = (); + type DmpQueue = frame::traits::EnqueueWithOrigin<(), sp_core::ConstU8<0>>; + } + + impl parachain_info::Config for Runtime {} + } + + #[docify::export] + mod consensus_pallets { + use super::*; + + impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = ConstU32<100_000>; + type AllowMultipleBlocksPerSlot = ConstBool; + #[cfg(feature = "experimental")] + type SlotDuration = pallet_aura::MinimumPeriodTimesTwo; + } + + #[docify::export(timestamp)] + #[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig as pallet_timestamp::DefaultConfig)] + impl pallet_timestamp::Config for Runtime {} + + impl cumulus_pallet_aura_ext::Config for Runtime {} + } + + #[docify::export(validate_block)] + cumulus_pallet_parachain_system::register_validate_block! { + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, + } + } +} diff --git a/developer-hub/src/polkadot_sdk/frame_runtime.rs b/developer-hub/src/polkadot_sdk/frame_runtime.rs new file mode 100644 index 00000000000..d9cb1fd531b --- /dev/null +++ b/developer-hub/src/polkadot_sdk/frame_runtime.rs @@ -0,0 +1,179 @@ +//! # FRAME +//! +//! ```no_compile +//! ______ ______ ________ ___ __ __ ______ +//! /_____/\ /_____/\ /_______/\ /__//_//_/\ /_____/\ +//! \::::_\/_\:::_ \ \ \::: _ \ \\::\| \| \ \\::::_\/_ +//! \:\/___/\\:(_) ) )_\::(_) \ \\:. \ \\:\/___/\ +//! \:::._\/ \: __ `\ \\:: __ \ \\:.\-/\ \ \\::___\/_ +//! \:\ \ \ \ `\ \ \\:.\ \ \ \\. \ \ \ \\:\____/\ +//! \_\/ \_\/ \_\/ \__\/\__\/ \__\/ \__\/ \_____\/ +//! ``` +//! +//! > **F**ramework for **R**untime **A**ggregation of **M**odularized **E**ntities: Substrate's +//! > State Transition Function (Runtime) Framework. +//! +//! ## Introduction +//! +//! As described in [`crate::reference_docs::wasm_meta_protocol`], at a high-level Substrate-based +//! blockchains are composed of two parts: +//! +//! 1. A *runtime* which represents the state transition function (i.e. "Business Logic") of a +//! blockchain, and is encoded as a WASM blob. +//! 2. A node whose primary purpose is to execute the given runtime. +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/substrate_simple.mmd")] +//! +//! *FRAME is the Substrate's framework of choice to build a runtime.* +//! +//! FRAME is composed of two major components, **pallets** and a **runtime**. +//! +//! ## Pallets +//! +//! A pallet is a unit of encapsulated logic. It has a clearly defined responsibility and can be +//! linked to other pallets. In order to be reusable, pallets shipped with FRAME strive to only care +//! about its own responsibilities and make as few assumptions about the general runtime as +//! possible. A pallet is analogous to a _module_ in the runtime. +//! +//! A pallet is defined as a `mod pallet` wrapped by the [`frame::pallet`] macro. Within this macro, +//! pallet components/parts can be defined. Most notable of these parts are: +//! +//! - [Config](frame::pallet_macros::config), allowing a pallet to make itself configurable and +//! generic over types, values and such. +//! - [Storage](frame::pallet_macros::storage), allowing a pallet to define onchain storage. +//! - [Dispatchable function](frame::pallet_macros::call), allowing a pallet to define extrinsics +//! that are callable by end users, from the outer world. +//! - [Events](frame::pallet_macros::event), allowing a pallet to emit events. +//! - [Errors](frame::pallet_macros::error), allowing a pallet to emit well-formed errors. +//! +//! Some of these pallet components resemble the building blocks of a smart contract. While both +//! models are programming state transition functions of blockchains, there are crucial differences +//! between the two. See [`crate::reference_docs::runtime_vs_smart_contract`] for more. +//! +//! Most of these components are defined using macros, the full list of which can be found in +//! [`frame::pallet_macros`]. +//! +//! ### Example +//! +//! The following examples showcases a minimal pallet. +#![doc = docify::embed!("src/polkadot_sdk/frame_runtime.rs", pallet)] +//! +//! +//! A runtime is a collection of pallets that are amalgamated together. Each pallet typically has +//! some configurations (exposed as a `trait Config`) that needs to be *specified* in the runtime. +//! This is done with [`frame::runtime::prelude::construct_runtime`]. +//! +//! A (real) runtime that actually wishes to compile to WASM needs to also implement a set of +//! runtime-apis. These implementation can be specified using the +//! [`frame::runtime::prelude::impl_runtime_apis`] macro. +//! +//! ### Example +//! +//! The following example shows a (test) runtime that is composing the pallet demonstrated above, +//! next to the [`frame::prelude::frame_system`] pallet, into a runtime. +#![doc = docify::embed!("src/polkadot_sdk/frame_runtime.rs", runtime)] +//! +//! ## More Examples +//! +//! You can find more FRAME examples that revolve around specific features at [`pallet_examples`]. +//! +//! ## Alternatives 🌈 +//! +//! There is nothing in the Substrate's node side code-base that mandates the use of FRAME. While +//! FRAME makes it very simple to write Substrate-based runtimes, it is by no means intended to be +//! the only one. At the end of the day, any WASM blob that exposes the right set of runtime APIs is +//! a valid Runtime form the point of view of a Substrate client (see +//! [`crate::reference_docs::wasm_meta_protocol`]). Notable examples are: +//! +//! * writing a runtime in pure Rust, as done in [this template](https://github.com/JoshOrndorff/frameless-node-template). +//! * writing a runtime in AssemblyScript,as explored in [this project](https://github.com/LimeChain/subsembly). + +#[cfg(test)] +mod tests { + use frame::prelude::*; + + /// A FRAME based pallet. This `mod` is the entry point for everything else. All + /// `#[pallet::xxx]` macros must be defined in this `mod`. Although, frame also provides an + /// experimental feature to break these parts into different `mod`s. See [`pallet_examples`] for + /// more. + #[docify::export] + #[frame::pallet(dev_mode)] + pub mod pallet { + use super::*; + + /// The configuration trait of a pallet. Mandatory. Allows a pallet to receive types at a + /// later point from the runtime that wishes to contain it. It allows the pallet to be + /// parameterized over both types and values. + #[pallet::config] + pub trait Config: frame_system::Config { + /// A type that is not known now, but the runtime that will contain this pallet will + /// know it later, therefore we define it here as an associated type. + type RuntimeEvent: IsType<::RuntimeEvent> + + From>; + + /// A parameterize-able value that we receive later via the `Get<_>` trait. + type ValueParameter: Get; + + /// Similar to [`Config::ValueParameter`], but using `const`. Both are functionally + /// equal, but offer different tradeoffs. + const ANOTHER_VALUE_PARAMETER: u32; + } + + /// A mandatory struct in each pallet. All functions callable by external users (aka. + /// transactions) must be attached to this type (see [`frame::pallet_macros::call`]). For + /// convenience, internal (private) functions can also be attached to this type. + #[pallet::pallet] + pub struct Pallet(PhantomData); + + /// The events tha this pallet can emit. + #[pallet::event] + pub enum Event {} + + /// A storage item that this pallet contains. This will be part of the state root trie/root + /// of the blockchain. + #[pallet::storage] + pub type Value = StorageValue; + + /// All *dispatchable* call functions (aka. transactions) are attached to `Pallet` in a + /// `impl` block. + #[pallet::call] + impl Pallet { + /// This will be callable by external users, and has two u32s as a parameter. + pub fn some_dispatchable( + _origin: OriginFor, + _param: u32, + _other_para: u32, + ) -> DispatchResult { + Ok(()) + } + } + } + + /// A simple runtime that contains the above pallet and `frame_system`, the mandatory pallet of + /// all runtimes. This runtime is for testing, but it shares a lot of similarities with a *real* + /// runtime. + #[docify::export] + pub mod runtime { + use super::pallet as pallet_example; + use frame::{prelude::*, testing_prelude::*}; + + // The major macro that amalgamates pallets into `struct Runtime` + construct_runtime!( + pub struct Runtime { + System: frame_system, + Example: pallet_example, + } + ); + + // These `impl` blocks specify the parameters of each pallet's `trait Config`. + #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + impl frame_system::Config for Runtime { + type Block = MockBlock; + } + + impl pallet_example::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type ValueParameter = ConstU32<42>; + const ANOTHER_VALUE_PARAMETER: u32 = 42; + } + } +} diff --git a/developer-hub/src/polkadot_sdk/mod.rs b/developer-hub/src/polkadot_sdk/mod.rs new file mode 100644 index 00000000000..38ce5c404ff --- /dev/null +++ b/developer-hub/src/polkadot_sdk/mod.rs @@ -0,0 +1,134 @@ +//! # Polkadot SDK +//! +//! [Polkadot SDK](https://github.com/paritytech/polkadot-sdk) provides the main resources needed to +//! start building on the [Polkadot network](https://polkadot.network), a scalable, multi-chain +//! blockchain platform that enables different blockchains to securely interoperate. +//! +//! [![StackExchange](https://img.shields.io/badge/StackExchange-Polkadot%20and%20Substrate-222222?logo=stackexchange)](https://substrate.stackexchange.com/) +//! +//! [![awesomeDot](https://img.shields.io/badge/polkadot-awesome-e6007a?logo=polkadot)](https://github.com/Awsmdot/awesome-dot) +//! [![wiki](https://img.shields.io/badge/polkadot-wiki-e6007a?logo=polkadot)](https://wiki.polkadot.network/) +//! [![forum](https://img.shields.io/badge/polkadot-forum-e6007a?logo=polkadot)](https://forum.polkadot.network/) +//! +//! [![RFCs](https://img.shields.io/badge/fellowship-RFCs-e6007a?logo=polkadot)](https://github.com/polkadot-fellows/rfcs) +//! [![Runtime](https://img.shields.io/badge/fellowship-runtimes-e6007a?logo=polkadot)](https://github.com/polkadot-fellows/runtimes) +//! [![Manifesto](https://img.shields.io/badge/fellowship-manifesto-e6007a?logo=polkadot)](https://github.com/polkadot-fellows/manifesto) +//! +//! ## Getting Started +//! +//! The primary way to get started with the Polkadot SDK is to start writing a FRAME-based runtime. +//! See: +//! +//! * [`polkadot`], to understand what is Polkadot as a development platform. +//! * [`substrate`], for an overview of what Substrate as the main blockchain framework of Polkadot +//! SDK. +//! * [`frame`], to learn about how to write blockchain applications aka. "App Chains". +//! * Continue with the [`developer_hub`'s "getting started"](crate#getting-started). +//! +//! ## Components +//! +//! #### Substrate +//! +//! [![Substrate-license](https://img.shields.io/badge/License-GPL3%2FApache2.0-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/LICENSE-APACHE2) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-substrate-2324CC85)](https://github.com/paritytech/polkadot-sdk/blob/master/substrate) +//! +//! [`substrate`] is the base blockchain framework used to power the Polkadot SDK. It is a full +//! toolkit to create sovereign blockchains, including but not limited to those who connect to +//! Polkadot as parachains. +//! +//! #### FRAME +//! +//! [![Substrate-license](https://img.shields.io/badge/License-Apache2.0-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/LICENSE-APACHE2) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-frame-2324CC85)](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/frame) +//! +//! [`frame`] is the framework used to create Substrate-based application logic, aka. runtimes. +//! Learn more about the distinction of a runtime and node in +//! [`reference_docs::wasm_meta_protocol`]. +//! +//! #### Cumulus +//! +//! [![Cumulus-license](https://img.shields.io/badge/License-GPL3-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/cumulus/LICENSE) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-cumulus-white)](https://github.com/paritytech/polkadot-sdk/blob/master/cumulus) +//! +//! [`cumulus`] transforms FRAME-based runtimes into Polkadot-compatible parachain runtimes, and +//! Substrate-based nodes into Polkadot/Parachain-compatible nodes. +//! +//! #### XCM +//! +//! [![XCM-license](https://img.shields.io/badge/License-GPL3-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/LICENSE) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-XCM-e6007a?logo=polkadot)](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/xcm) +//! +//! [`xcm`], short for "cross consensus message", is the primary format that is used for +//! communication between parachains, but is intended to be extensible to other use cases as well. +//! +//! #### Polkadot +//! +//! [![Polkadot-license](https://img.shields.io/badge/License-GPL3-blue)](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/LICENSE) +//! [![GitHub +//! Repo](https://img.shields.io/badge/github-polkadot-e6007a?logo=polkadot)](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot) +//! +//! [`polkadot`] is an implementation of a Polkadot node in Rust, by `@paritytech`. The Polkadot +//! runtimes are located under the +//! [`polkadot-fellows/runtimes`](https://github.com/polkadot-fellows/runtimes) repository. +//! +//! ### Summary +//! +//! The following diagram summarizes how some of the components of Polkadot SDK work together: +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/polkadot_sdk_substrate.mmd")] +//! +//! A Substrate-based chain is a blockchain composed of a runtime and a node. As noted above, the +//! runtime is the application logic of the blockchain, and the node is everything else. +//! See [`crate::reference_docs::wasm_meta_protocol`] for an in-depth explanation of this. The +//! former is built with [`frame`], and the latter is built with rest of Substrate. +//! +//! > You can think of a Substrate-based chain as a white-labeled blockchain. +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/polkadot_sdk_polkadot.mmd")] +//! Polkadot is itself a Substrate-based chain, composed of the exact same two components. It has +//! specialized logic in both the node and the runtime side, but it is not "special" in any way. +//! +//! A parachain is a "special" Substrate-based chain, whereby both the node and the runtime +//! components have became "Polkadot-aware" using Cumulus. +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/polkadot_sdk_parachain.mmd")] +//! +//! ## Notable Upstream Crates +//! +//! - [`parity-scale-codec`](https://github.com/paritytech/parity-scale-codec) +//! - [`parity-db`](https://github.com/paritytech/parity-db) +//! - [`trie`](https://github.com/paritytech/trie) +//! - [`parity-common`](https://github.com/paritytech/parity-common) +//! +//! ## Trophy Section: Notable Downstream Projects +//! +//! A list of projects and tools in the blockchain ecosystem that one way or another parts of the +//! Polkadot SDK: +//! +//! * [Polygon's spin-off, Avail](https://github.com/availproject/avail) +//! * [Cardano Partner Chains](https://iohk.io/en/blog/posts/2023/11/03/partner-chains-are-coming-to-cardano/) +//! * [Starknet's Madara Sequencer](https://github.com/keep-starknet-strange/madara) +//! +//! [`substrate`]: crate::polkadot_sdk::substrate +//! [`frame`]: crate::polkadot_sdk::frame_runtime +//! [`cumulus`]: crate::polkadot_sdk::cumulus +//! [`polkadot`]: crate::polkadot_sdk::polkadot +//! [`xcm`]: crate::polkadot_sdk::xcm + +/// Lean about Cumulus, the framework that transforms [`substrate`]-based chains into +/// [`polkadot`]-enabled parachains. +pub mod cumulus; +/// Learn about FRAME, the framework used to build Substrate runtimes. +pub mod frame_runtime; +/// Learn about Polkadot as a platform. +pub mod polkadot; +/// Learn about different ways through which smart contracts can be utilized on top of Substrate, +/// and in the Polkadot ecosystem. +pub mod smart_contracts; +/// Learn about Substrate, the main blockchain framework used in the Polkadot ecosystem. +pub mod substrate; +/// Index of all the templates that can act as first scaffold for a new project. +pub mod templates; +/// Learn about XCM, the de-facto communication language between different consensus systems. +pub mod xcm; diff --git a/developer-hub/src/polkadot_sdk/polkadot.rs b/developer-hub/src/polkadot_sdk/polkadot.rs new file mode 100644 index 00000000000..d157a660e56 --- /dev/null +++ b/developer-hub/src/polkadot_sdk/polkadot.rs @@ -0,0 +1,87 @@ +//! # Polkadot +//! +//! Implementation of the Polkadot node/host in Rust. +//! +//! ## Learn More and Get Involved +//! +//! - [Polkadot Forum](https://forum.polkadot.network/) +//! - [Polkadot Parachains](https://parachains.info/) +//! - [Polkadot (multi-chain) Explorer](https://subscan.io/) +//! - Polkadot Fellowship +//! - [Manifesto](https://github.com/polkadot-fellows/manifesto) +//! - [Runtimes](https://github.com/polkadot-fellows/runtimes) +//! - [RFCs](https://github.com/polkadot-fellows/rfcs) +//! - [Polkadot Specs](spec.polkadot.network) +//! - [The Polkadot Parachain Host Implementers' Guide](https://paritytech.github.io/polkadot-sdk/book/) +//! - [Whitepaper](https://www.polkadot.network/whitepaper/) +//! +//! ## Alternative Node Implementations 🌈 +//! +//! - [Smoldot](https://crates.io/crates/smoldot-light). Polkadot light node/client. +//! - [KAGOME](https://github.com/qdrvm/kagome). C++ implementation of the Polkadot host. +//! - [Gossamer](https://github.com/ChainSafe/gossamer). Golang implementation of the Polkadot host. +//! +//! ## Platform +//! +//! In this section, we examine what what platform Polkadot exactly provides to developers. +//! +//! ### Polkadot White Paper +//! +//! The original vision of Polkadot (everything in the whitepaper, which was eventually called +//! **Polkadot 1.0**) revolves around the following arguments: +//! +//! * Future is multi-chain, because we need different chains with different specialization to +//! achieve widespread goals. +//! * In other words, no single chain is good enough to achieve all goals. +//! * A multi-chain future will inadvertently suffer from fragmentation of economic security. +//! * This stake fragmentation will make communication over consensus system with varying security +//! levels inherently unsafe. +//! +//! Polkadot's answer to the above is: +//! +//! > The chains of the future must have a way to share their economic security, whilst maintaining +//! > their execution and governance sovereignty. These chains are called "Parachains". +//! +//! * Shared Security: The idea of shared economic security sits at the core of Polkadot. Polkadot +//! enables different parachains* to pool their economic security from Polkadot (i.e. "*Relay +//! Chain*"). +//! * (heterogenous) Sharded Execution: Yet, each parachain is free to have its own execution logic +//! (runtime), which also encompasses governance and sovereignty. Moreover, Polkadot ensures the +//! correct execution of all parachain, without having all of its validators re-execute all +//! parachain blocks. When seen from this perspective, the fact that Polkadot executes different +//! parachains means it is a platform that has fully delivered (the holy grail of) "Full Execution +//! Sharding". TODO: link to approval checking article. https://github.com/paritytech/polkadot-sdk-docs/issues/66 +//! * A framework to build blockchains: In order to materialize the ecosystem of parachains, an easy +//! blockchain framework must exist. This is [Substrate](crate::polkadot_sdk::substrate), +//! [FRAME](crate::polkadot_sdk::frame_runtime) and [Cumulus](crate::polkadot_sdk::cumulus). +//! * A communication language between blockchains: In order for these blockchains to communicate, +//! they need a shared language. [XCM](crate::polkadot_sdk::xcm) is one such language, and the one +//! that is most endorsed in the Polkadot ecosystem. +//! +//! > Note that the interoperability promised by Polkadot is unparalleled in that any two parachains +//! > connected to Polkadot have the same security and can have much better guarantees about the +//! > security of the recipient of any message. TODO: weakest link in bridges systems. https://github.com/paritytech/polkadot-sdk-docs/issues/66 +//! +//! Polkadot delivers the above vision, alongside a flexible means for parachains to schedule +//! themselves with the Relay Chain. To achieve this, Polkadot has been developed with an +//! architecture similar to that of a computer. Polkadot Relay Chain has a number of "cores". Each +//! core is (in simple terms) capable of progressing 1 parachain at a time. For example, a parachain +//! can schedule itself on a single core for 5 relay chain blocks. +//! +//! Within the scope of Polkadot 1.x, two main scheduling ways have been considered: +//! +//! * Long term Parachains, obtained through locking a sum of DOT in an auction system. +//! * on-demand Parachains, purchased through paying DOT to the relay-chain whenever needed. +//! +//! ### The Future +//! +//! After delivering Polkadot 1.x, the future of Polkadot as a protocol and platform is in the hands +//! of the community and the fellowship. This is happening most notable through the RFC process. +//! Some of the RFCs that do alter Polkadot as a platform and have already passed are as follows: +//! +//! - RFC#1: [Agile-coretime](https://github.com/polkadot-fellows/RFCs/blob/main/text/0001-agile-coretime.md): +//! Agile periodic-sale-based model for assigning Coretime on the Polkadot Ubiquitous Computer. +//! - RFC#5: [Coretime-interface](https://github.com/polkadot-fellows/RFCs/blob/main/text/0005-coretime-interface.md): +//! Interface for manipulating the usage of cores on the Polkadot Ubiquitous Computer. +// TODO: add more context and explanations about Polkadot as the Ubiquitous Computer and related +// tech. https://github.com/paritytech/polkadot-sdk-docs/issues/66 diff --git a/developer-hub/src/polkadot_sdk/smart_contracts.rs b/developer-hub/src/polkadot_sdk/smart_contracts.rs new file mode 100644 index 00000000000..a4916f9c921 --- /dev/null +++ b/developer-hub/src/polkadot_sdk/smart_contracts.rs @@ -0,0 +1,9 @@ +//! # Smart Contracts +//! +//! TODO: @cmichi https://github.com/paritytech/polkadot-sdk-docs/issues/56 +//! +//! - WASM and EVM based, pallet-contracts and pallet-evm. +//! - single-daap-chain, transition from ink! to FRAME. +//! - Link to `use.ink` +//! - Link to [`crate::reference_docs::runtime_vs_smart_contract`]. +//! - https://use.ink/migrate-ink-contracts-to-polkadot-frame-parachain/ diff --git a/developer-hub/src/polkadot_sdk/substrate.rs b/developer-hub/src/polkadot_sdk/substrate.rs new file mode 100644 index 00000000000..399f18d03be --- /dev/null +++ b/developer-hub/src/polkadot_sdk/substrate.rs @@ -0,0 +1,151 @@ +//! # Substrate +//! +//! Substrate is a Rust framework for building blockchains in a modular and extensible way. While in +//! itself un-opinionated, it is the main engine behind the Polkadot ecosystem. +//! +//! ## Overview, Philosophy +//! +//! Substrate approaches blockchain development with an acknowledgement of a few self-evident +//! truths: +//! +//! 1. Society and technology evolves. +//! 2. Humans are fallible. +//! +//! This, makes the task of designing a correct, safe and long-lasting blockchain system hard. +//! +//! Nonetheless, in strive towards achieve this goal, Substrate embraces the following: +//! +//! 1. Use of **Rust** as a modern and safe programming language, which limits human error through +//! various means, most notably memory and type safety. +//! 2. Substrate is written from the ground-up with a *generic, modular and extensible* design. This +//! ensures that software components can be easily swapped and upgraded. Examples of this is +//! multiple consensus mechanisms provided by Substrate, as listed below. +//! 3. Lastly, the final blockchain system created with the above properties needs to be +//! upgradeable. In order to achieve this, Substrate is designed as a meta-protocol, whereby the +//! application logic of the blockchain (called "Runtime") is encoded as a WASM blob, and is +//! stored in the state. The rest of the system (called "node") acts as the executor of the WASM +//! blob. +//! +//! In essence, the meta-protocol of all Substrate based chains is the "Runtime as WASM blob" +//! accord. This enables the Runtime to become inherently upgradeable, crucially without forks. The +//! upgrade is merely a matter of the WASM blob being changed in the state, which is, in principle, +//! same as updating an account's balance. Learn more about this in detail in +//! [`crate::reference_docs::wasm_meta_protocol`]. +//! +//! > A great analogy for substrate is the following: Substrate node is a gaming console, and a WASM +//! > runtime, possibly created with FRAME is the game being inserted into the console. +//! +//! [`frame`], Substrate's default runtime development library, takes the above safety practices +//! even further by embracing a declarative programming model whereby correctness is enhanced and +//! the system is highly configurable through parameterization. Learn more about this in +//! [`crate::reference_docs::trait_based_programming`]. +//! +//! ## How to Get Started +//! +//! Substrate offers different options at the spectrum of technical freedom <-> development ease. +//! +//! * The easiest way to use Substrate is to use one of the templates (some of which listed at +//! [`crate::polkadot_sdk::templates`]) and only tweak the parameters of the runtime or node. This +//! allows you to launch a blockchain in minutes, but is limited in technical freedom. +//! * Next, most developers wish to develop their custom runtime modules, for which the de-facto way +//! is [`frame`](crate::polkadot_sdk::frame_runtime). +//! * Finally, Substrate is highly configurable at the node side as well, but this is the most +//! technically demanding. +//! +//! > A notable Substrate-based blockchain that has built both custom FRAME pallets and custom +//! > node-side components is . +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/substrate_dev.mmd")] +//! +//! ## Structure +//! +//! Substrate contains a large number of crates, therefore it is useful to have an overview of what +//! they are, and how they are organized. In broad terms, these crates are divided into three +//! categories: +//! +//! * `sc-*` (short for *Substrate-client*) crates, located under `./client` folder. These are all +//! the crates that lead to the node software. Notable examples [`sc_network`], various consensus +//! crates, RPC ([`sc_rpc_api`]) and database ([`sc_client_db`]), all of which are expected to +//! reside in the node side. +//! * `sp-*` (short for *substrate-primitives*) crates, located under `./primitives` folder. These +//! are crates that facilitate both the node and the runtime, but are not opinionated about what +//! framework is using for building the runtime. Notable examples are [`sp_api`] and [`sp_io`], +//! which form the communication bridge between the node and runtime. +//! * `pallet-*` and `frame-*` crates, located under `./frame` folder. These are the crates related +//! to FRAME. See [`frame`] for more information. +//! +//! ### WASM Build +//! +//! Many of the Substrate crates, such as entire `sp-*`, need to compile to both WASM (when a WASM +//! runtime is being generated) and native (for example, when testing). To achieve this, Substrate +//! follows the convention of the Rust community, and uses a `feature = "std"` to signify that a +//! crate is being built with the standard library, and is built for native. Otherwise, it is built +//! for `no_std`. +//! +//! This can be summarized in `#![cfg_attr(not(feature = "std"), no_std)]`, which you can often find +//! in any Substrate-based runtime. +//! +//! Substrate-based runtimes use [`substrate_wasm_builder`] in their `build.rs` to automatically +//! build their WASM files as a part of normal build command (e.g. `cargo build`). Once built, the +//! wasm file is placed in `./target/{debug|release}/wbuild/{runtime_name}.wasm`. +//! +//! ### Binaries +//! +//! Multiple binaries are shipped with substrate, the most important of which are located in the +//! [`./bin`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin) folder. +//! +//! * [`node_cli`] is an extensive substrate node that contains the superset of all runtime and node +//! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the +//! modules that are provided with `FRAME`. This node and runtime is only used for testing and +//! demonstration. +//! * [`chain_spec_builder`]: Utility to build more detailed chain-specs for the aforementioned +//! node. Other projects typically contain a `build-spec` subcommand that does the same. +//! * [`node_template`](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/bin/node-template): +//! a template node that contains a minimal set of features and can act as a starting point of a +//! project. +//! * [`subkey`]: Substrate's key management utility. +//! +//! ### Anatomy of a Binary Crate +//! +//! From the above, [`node_cli`]/[`kitchensink_runtime`] and `node-template` are essentially +//! blueprints of a Substrate-based project, as the name of the latter is implying. Each +//! Substrate-based project typically contains the following: +//! +//! * Under `./runtime`, a `./runtime/src/lib.rs` which is the top level runtime amalgamator file. +//! This file typically contains the [`frame::runtime::prelude::construct_runtime`] and +//! [`frame::runtime::prelude::impl_runtime_apis`] macro calls, which is the final definition of a +//! runtime. +//! +//! * Under `./node`, a `main.rs`, which is the starting point, and a `./service.rs`, which contains +//! all the node side components. Skimming this file yields an overview of the networking, +//! database, consensus and similar node side components. +//! +//! > The above two are conventions, not rules. +//! +//! > See for an update on how the node side +//! > components are being amalgamated. +//! +//! ## Parachain? +//! +//! As noted above, Substrate is the main engine behind the Polkadot ecosystem. One of the ways +//! through which Polkadot can be utilized is by building "parachains", blockchains that are +//! connected to Polkadot's shared security. +//! +//! To build a parachain, one could use [Cumulus](crate::polkadot_sdk::cumulus), the library on +//! top of Substrate, empowering any substrate-based chain to be a Polkadot parachain. +//! +//! ## Where To Go Next? +//! +//! Additional noteworthy crates within substrate: +//! +//! - RPC APIs of a Substrate node: [`sc_rpc_api`]/[`sc_rpc`] +//! - CLI Options of a Substrate node: [`sc_cli`] +//! - All of the consensus related crates provided by Substrate: +//! - [`sc_consensus_aura`] +//! - [`sc_consensus_babe`] +//! - [`sc_consensus_grandpa`] +//! - [`sc_consensus_beefy`] (TODO: @adrian, add some high level docs https://github.com/paritytech/polkadot-sdk-docs/issues/57) +//! - [`sc_consensus_manual_seal`] +//! - [`sc_consensus_pow`] + +#[doc(hidden)] +pub use crate::polkadot_sdk; diff --git a/developer-hub/src/polkadot_sdk/templates.rs b/developer-hub/src/polkadot_sdk/templates.rs new file mode 100644 index 00000000000..f60c75b8f21 --- /dev/null +++ b/developer-hub/src/polkadot_sdk/templates.rs @@ -0,0 +1,45 @@ +//! # Templates +//! +//! ### Internal +//! +//! The following templates are maintained as a part of the `polkadot-sdk` repository: +//! +//! - classic [`substrate-node-template`]: is a white-labeled substrate-based blockchain with a +//! moderate amount of features. It can act as a great starting point for those who want to learn +//! Substrate/FRAME and want to have a template that is already doing something. +//! - [`substrate-minimal-template`]: Same as the above, but it contains the least amount of code in +//! both the node and runtime. It is a great starting point for those who want to deeply learn +//! Substrate and FRAME. +//! - classic [`cumulus-parachain-template`], which is the de-facto parachain template shipped with +//! Cumulus. It is the parachain-enabled version of [`substrate-node-template`]. +//! +//! ### External Templates +//! +//! Noteworthy templates outside of this repository. +//! +//! - [`extended-parachain-template`](https://github.com/paritytech/extended-parachain-template): A +//! parachain template that contains more built-in functionality such as assets and NFTs. +//! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template): A +//! parachain template for launching EVM-compatible parachains. +//! +//! [`substrate-node-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/node-template/ +//! [`substrate-minimal-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/substrate/bin/minimal/ +//! [`cumulus-parachain-template`]: https://github.com/paritytech/polkadot-sdk/blob/master/cumulus/parachain-template/ + +// TODO: in general, we need to make a deliberate choice here of moving a few key templates to this +// repo (nothing stays in `substrate-developer-hub`) and the everything else should be community +// maintained. https://github.com/paritytech/polkadot-sdk-docs/issues/67 + +// TODO: we should rename `substrate-node-template` to `substrate-basic-template`, +// `substrate-blockchain-template`. `node` is confusing in the name. +// `substrate-blockchain-template` and `cumulus-parachain-template` go well together 🤝. https://github.com/paritytech/polkadot-sdk-docs/issues/67 + +// NOTE: a super important detail that I am looking forward to here is +// and +// . Meaning that I would not spend time on +// teaching someone too much detail about the ugly thing we call "node" nowadays. In the future, I +// am sure we will either have a better "node-builder" code that can actually be tested, or an +// "omni-node" that can run (almost) any wasm file. We should already build tutorials in this +// direction IMO. This also affects all the templates. If we have a good neat runtime file, which we +// are moving toward, and a good node-builder, we don't need all of these damn templates. These +// templates are only there because the boilerplate is super horrible atm. diff --git a/developer-hub/src/polkadot_sdk/xcm.rs b/developer-hub/src/polkadot_sdk/xcm.rs new file mode 100644 index 00000000000..0d600f751c8 --- /dev/null +++ b/developer-hub/src/polkadot_sdk/xcm.rs @@ -0,0 +1,5 @@ +//! # XCM +//! +//! @KiChjang @franciscoaguirre +//! TODO: RFCs, xcm-spec, the future of the repo, minimal example perhaps, forward to where actual +//! docs are hosted. https://github.com/paritytech/polkadot-sdk-docs/issues/58 diff --git a/developer-hub/src/reference_docs/blockchain_scalibility.rs b/developer-hub/src/reference_docs/blockchain_scalibility.rs new file mode 100644 index 00000000000..e69de29bb2d diff --git a/developer-hub/src/reference_docs/blockchain_state_machines.rs b/developer-hub/src/reference_docs/blockchain_state_machines.rs new file mode 100644 index 00000000000..611ca2b778a --- /dev/null +++ b/developer-hub/src/reference_docs/blockchain_state_machines.rs @@ -0,0 +1,29 @@ +//! # State Transition Function +//! +//! This document briefly explains how in the context of Substrate-based blockchains, we view the +//! blockchain as a **decentralized state transition function**. +//! +//! Recall that a blockchain's main purpose is to help a permissionless set of entities to agree on +//! a shared data-set, and how it evolves. This is called the **State**, also referred to as +//! "onchain" data, or *Storage* in the context of FRAME. The state is where the account balance of +//! each user is, for example, stored, and there is a canonical version of it that everyone agrees +//! upon. +//! +//! Then, recall that a typical blockchain system will alter its state through execution of blocks. +//! *The component that dictates how this state alteration can happen is called the state transition +//! function*. +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/stf_simple.mmd")] +//! +//! In Substrate-based blockchains, the state transition function is called the *Runtime*. This is +//! explained further in [`crate::reference_docs::wasm_meta_protocol`]. +//! +//! With this in mind, we can paint a complete picture of a blockchain as a state machine: +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/stf.mmd")] +//! +//! In essence, the state of the blockchain at block N is the outcome of applying the state +//! transition function to the the previous state, and the current block as input. This can be +//! mathematically represented as: +//! +//! ```math +//! STF = F(State_N, Block_N) -> State_{N+1} +//! ``` diff --git a/developer-hub/src/reference_docs/chain_spec_genesis.rs b/developer-hub/src/reference_docs/chain_spec_genesis.rs new file mode 100644 index 00000000000..2ac51a91f2d --- /dev/null +++ b/developer-hub/src/reference_docs/chain_spec_genesis.rs @@ -0,0 +1,4 @@ +//! Chain spec and genesis build. +//! +//! What is chain-spec. +//! What is genesis state and how to build it. diff --git a/developer-hub/src/reference_docs/cli.rs b/developer-hub/src/reference_docs/cli.rs new file mode 100644 index 00000000000..9274e86b04e --- /dev/null +++ b/developer-hub/src/reference_docs/cli.rs @@ -0,0 +1,7 @@ +//! # Command Line Arguments +//! +//! +//! Notes: +//! +//! - Command line arguments of a typical substrate based chain, and how to find and learn them. +//! - How to extend them with your custom stuff. diff --git a/developer-hub/src/reference_docs/consensus_swapping.rs b/developer-hub/src/reference_docs/consensus_swapping.rs new file mode 100644 index 00000000000..e639761ee97 --- /dev/null +++ b/developer-hub/src/reference_docs/consensus_swapping.rs @@ -0,0 +1,6 @@ +//! Consensus Swapping +//! +//! Notes: +//! +//! - The typical workshop done by Joshy in some places where he swaps out the consensus to be PoW. +//! - This could also be a tutorial rather than a ref doc, depending on the size. diff --git a/developer-hub/src/reference_docs/extrinsic_encoding.rs b/developer-hub/src/reference_docs/extrinsic_encoding.rs new file mode 100644 index 00000000000..b10b100bfbb --- /dev/null +++ b/developer-hub/src/reference_docs/extrinsic_encoding.rs @@ -0,0 +1,277 @@ +//! # Constructing and Signing Extrinsics +//! +//! Extrinsics are payloads that are stored in blocks which are responsible for altering the state +//! of a blockchain via the [_state transition +//! function_][crate::reference_docs::blockchain_state_machines]. +//! +//! Substrate is configurable enough that extrinsics can take any format. In practice, runtimes +//! tend to use our [`sp_runtime::generic::UncheckedExtrinsic`] type to represent extrinsics, +//! because it's generic enough to cater for most (if not all) use cases. In Polkadot, this is +//! configured [here](https://github.com/polkadot-fellows/runtimes/blob/94b2798b69ba6779764e20a50f056e48db78ebef/relay/polkadot/src/lib.rs#L1478) +//! at the time of writing. +//! +//! What follows is a description of how extrinsics based on this +//! [`sp_runtime::generic::UncheckedExtrinsic`] type are encoded into bytes. Specifically, we are +//! looking at how extrinsics with a format version of 4 are encoded. This version is itself a part +//! of the payload, and if it changes, it indicates that something about the encoding may have +//! changed. +//! +//! # Encoding an Extrinsic +//! +//! At a high level, all extrinsics compatible with [`sp_runtime::generic::UncheckedExtrinsic`] +//! are formed from concatenating some details together, as in the following pseudo-code: +//! +//! ```text +//! extrinsic_bytes = concat( +//! compact_encoded_length, +//! version_and_maybe_signature, +//! call_data +//! ) +//! ``` +//! +//! For clarity, the actual implementation in Substrate looks like this: +#![doc = docify::embed!("../substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs", unchecked_extrinsic_encode_impl)] +//! +//! Let's look at how each of these details is constructed: +//! +//! ## compact_encoded_length +//! +//! This is a [SCALE compact encoded][frame::deps::codec::Compact] integer which is equal to the +//! length, in bytes, of the rest of the extrinsic details. +//! +//! To obtain this value, we must encode and concatenate together the rest of the extrinsic details +//! first, and then obtain the byte length of these. We can then compact encode that length, and +//! prepend it to the rest of the details. +//! +//! ## version_and_maybe_signature +//! +//! If the extrinsic is _unsigned_, then `version_and_maybe_signature` will be just one byte +//! denoting the _transaction protocol version_, which is 4 (or `0b0000_0100`). +//! +//! If the extrinsic is _signed_ (all extrinsics submitted from users must be signed), then +//! `version_and_maybe_signature` is obtained by concatenating some details together, ie: +//! +//! ```text +//! version_and_maybe_signature = concat( +//! version_and_signed, +//! from_address, +//! signature, +//! signed_extensions_extra, +//! ) +//! ``` +//! +//! Each of the details to be concatenated together is explained below: +//! +//! ### version_and_signed +//! +//! This is one byte, equal to `0x84` or `0b1000_0100` (i.e. an upper 1 bit to denote that it is +//! signed, and then the transaction version, 4, in the lower bits). +//! +//! ### from_address +//! +//! This is the [SCALE encoded][frame::deps::codec] address of the sender of the extrinsic. The +//! address is the first generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`], and so +//! can vary from chain to chain. +//! +//! The address type used on the Polkadot relay chain is [`sp_runtime::MultiAddress`], +//! where `AccountId32` is defined [here][`sp_core::crypto::AccountId32`]. When constructing a +//! signed extrinsic to be submitted to a Polkadot node, you'll always use the +//! [`sp_runtime::MultiAddress::Id`] variant to wrap your `AccountId32`. +//! +//! ### signature +//! +//! This is the [SCALE encoded][frame::deps::codec] signature. The signature type is configured via +//! the third generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`], which determines the +//! shape of the signature and signing algorithm that should be used. +//! +//! The signature is obtained by signing the _signed payload_ bytes (see below on how this is +//! constructed) using the private key associated with the address and correct algorithm. +//! +//! The signature type used on the Polkadot relay chain is [`sp_runtime::MultiSignature`]; the +//! variants there are the types of signature that can be provided. +//! +//! ### signed_extensions_extra +//! +//! This is the concatenation of the [SCALE encoded][frame::deps::codec] bytes representing each of +//! the [_signed extensions_][sp_runtime::traits::SignedExtension], and are configured by the +//! fourth generic parameter of [`sp_runtime::generic::UncheckedExtrinsic`]. Learn more about +//! signed extensions [here][crate::reference_docs::signed_extensions]. +//! +//! When it comes to constructing an extrinsic, each signed extension has two things that we are +//! interested in here: +//! +//! - The actual SCALE encoding of the signed extension type itself; this is what will form our +//! `signed_extensions_extra` bytes. +//! - An `AdditionalSigned` type. This is SCALE encoded into the `signed_extensions_additional` data +//! of the _signed payload_ (see below). +//! +//! Either (or both) of these can encode to zero bytes. +//! +//! Each chain configures the set of signed extensions that it uses in its runtime configuration. +//! At the time of writing, Polkadot configures them +//! [here](https://github.com/polkadot-fellows/runtimes/blob/1dc04eb954eadf8aadb5d83990b89662dbb5a074/relay/polkadot/src/lib.rs#L1432C25-L1432C25). +//! Some of the common signed extensions are defined +//! [here][frame::deps::frame_system#signed-extensions]. +//! +//! Information about exactly which signed extensions are present on a chain and in what order is +//! also a part of the metadata for the chain. For V15 metadata, it can be +//! [found here][frame::deps::frame_support::__private::metadata::v15::ExtrinsicMetadata]. +//! +//! ## call_data +//! +//! This is the main payload of the extrinsic, which is used to determine how the chain's state is +//! altered. This is defined by the second generic parameter of +//! [`sp_runtime::generic::UncheckedExtrinsic`]. +//! +//! A call can be anything that implements [`Encode`][frame::deps::codec::Encode]. In FRAME-based +//! runtimes, a call is represented as an enum of enums, where the outer enum represents the FRAME +//! pallet being called, and the inner enum represents the call being made within that pallet, and +//! any arguments to it. Read more about the call enum +//! [here][crate::reference_docs::frame_composite_enums]. +//! +//! FRAME `Call` enums are automatically generated, and end up looking something like this: +#![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", call_data)] +//! +//! In pseudo-code, this `Call` enum encodes equivalently to: +//! +//! ```text +//! call_data = concat( +//! pallet_index, +//! call_index, +//! call_args +//! ) +//! ``` +//! +//! - `pallet_index` is a single byte denoting the index of the pallet that we are calling into, and +//! is what the tag of the outermost enum will encode to. +//! - `call_index` is a single byte denoting the index of the call that we are making the pallet, +//! and is what the tag of the inner enum will encode to. +//! - `call_args` are the SCALE encoded bytes for each of the arguments that the call expects, and +//! are typically provided as values to the inner enum. +//! +//! Information about the pallets that exist for a chain (including their indexes), the calls +//! available in each pallet (including their indexes), and the arguments required for each call +//! can be found in the metadata for the chain. For V15 metadata, this information +//! [is here][frame::deps::frame_support::__private::metadata::v15::PalletMetadata]. +//! +//! # The Signed Payload Format +//! +//! All extrinsics submitted to a node from the outside world (also known as _transactions_) need to +//! be _signed_. The data that needs to be signed for some extrinsic is called the _signed payload_, +//! and its shape is described by the following pseudo-code: +//! +//! ```text +//! signed_payload = concat( +//! call_data, +//! signed_extensions_extra, +//! signed_extensions_additional, +//! ) +//! +//! if length(signed_payload) > 256 { +//! signed_payload = blake2_256(signed_payload) +//! } +//! ``` +//! +//! The bytes representing `call_data` and `signed_extensions_extra` can be obtained as descibed +//! above. `signed_extensions_additional` is constructed by SCALE encoding the +//! ["additional signed" data][sp_runtime::traits::SignedExtension::AdditionalSigned] for each +//! signed extension that the chain is using, in order. +//! +//! Once we've concatenated those together, we hash the result if it's greater than 256 bytes in +//! length using a Blake2 256bit hasher. +//! +//! The [`sp_runtime::generic::SignedPayload`] type takes care of assembling the correct payload +//! for us, given `call_data` and a tuple of signed extensions. +//! +//! # Example Encoding +//! +//! Using [`sp_runtime::generic::UncheckedExtrinsic`], we can construct and encode an extrinsic +//! as follows: +#![doc = docify::embed!("./src/reference_docs/extrinsic_encoding.rs", encoding_example)] + +#[docify::export] +pub mod call_data { + use parity_scale_codec::{Decode, Encode}; + + // The outer enum composes calls within + // different pallets together. We have two + // pallets, "PalletA" and "PalletB". + #[derive(Encode, Decode)] + pub enum Call { + #[codec(index = 0)] + PalletA(PalletACall), + #[codec(index = 7)] + PalletB(PalletBCall), + } + + // An inner enum represents the calls within + // a specific pallet. "PalletA" has one call, + // "Foo". + #[derive(Encode, Decode)] + pub enum PalletACall { + #[codec(index = 0)] + Foo(String), + } + + #[derive(Encode, Decode)] + pub enum PalletBCall { + #[codec(index = 0)] + Bar(String), + } +} + +#[docify::export] +pub mod encoding_example { + use super::call_data::{Call, PalletACall}; + use crate::reference_docs::signed_extensions::signed_extensions_example; + use parity_scale_codec::Encode; + use sp_core::crypto::AccountId32; + use sp_keyring::sr25519::Keyring; + use sp_runtime::{ + generic::{SignedPayload, UncheckedExtrinsic}, + MultiAddress, MultiSignature, + }; + + // Define some signed extensions to use. We'll use a couple of examples + // from the signed extensions reference doc. + type SignedExtensions = + (signed_extensions_example::AddToPayload, signed_extensions_example::AddToSignaturePayload); + + // We'll use `UncheckedExtrinsic` to encode our extrinsic for us. We set + // the address and signature type to those used on Polkadot, use our custom + // `Call` type, and use our custom set of `SignedExtensions`. + type Extrinsic = + UncheckedExtrinsic, Call, MultiSignature, SignedExtensions>; + + pub fn encode_demo_extrinsic() -> Vec { + // The "from" address will be our Alice dev account. + let from_address = MultiAddress::::Id(Keyring::Alice.to_account_id()); + + // We provide some values for our expected signed extensions. + let signed_extensions = ( + signed_extensions_example::AddToPayload(1), + signed_extensions_example::AddToSignaturePayload, + ); + + // Construct our call data: + let call_data = Call::PalletA(PalletACall::Foo("Hello".to_string())); + + // The signed payload. This takes care of encoding the call_data, + // signed_extensions_extra and signed_extensions_additional, and hashing + // the result if it's > 256 bytes: + let signed_payload = SignedPayload::new(&call_data, signed_extensions.clone()); + + // Sign the signed payload with our Alice dev account's private key, + // and wrap the signature into the expected type: + let signature = { + let sig = Keyring::Alice.sign(&signed_payload.encode()); + MultiSignature::Sr25519(sig) + }; + + // Now, we can build and encode our extrinsic: + let ext = Extrinsic::new_signed(call_data, from_address, signature, signed_extensions); + + let encoded_ext = ext.encode(); + encoded_ext + } +} diff --git a/developer-hub/src/reference_docs/fee_less_runtime.rs b/developer-hub/src/reference_docs/fee_less_runtime.rs new file mode 100644 index 00000000000..43a761a6c52 --- /dev/null +++ b/developer-hub/src/reference_docs/fee_less_runtime.rs @@ -0,0 +1,12 @@ +//! # Fee-Less Runtime +//! +//! +//! Notes: +//! +//! - An extension of [`runtime_vs_smart_contract`], showcasing the tools needed to build a safe +//! runtime that is fee-less. +//! - Would need to use unsigned origins, custom validate_unsigned, check the existence of some NFT +//! and some kind of rate limiting (eg. any account gets 5 free tx per day). +//! - The rule of thumb is that as long as the unsigned validate does one storage read, similar to +//! nonce, it is fine. +//! - This could possibly be a good tutorial/template, rather than a reference doc. diff --git a/developer-hub/src/reference_docs/frame_benchmarking_weight.rs b/developer-hub/src/reference_docs/frame_benchmarking_weight.rs new file mode 100644 index 00000000000..f65f4174ec6 --- /dev/null +++ b/developer-hub/src/reference_docs/frame_benchmarking_weight.rs @@ -0,0 +1,23 @@ +//! # FRAME Benchmarking and Weights. +//! +//! Notes: +//! +//! On Weight as a concept. +//! +//! - Why we need it. Super important. People hate this. We need to argue why it is worth it. +//! - Axis of weight: PoV + Time. +//! - pre dispatch weight vs. metering and post dispatch correction. +//! - mention that we will do this for PoV +//! - you can manually refund using `DispatchResultWithPostInfo`. +//! - Technically you can have weights with any benchmarking framework. You just need one number to +//! be computed pre-dispatch. But FRAME gives you a framework for this. +//! - improve documentation of `#[weight = ..]` and `#[pallet::weight(..)]`. All syntax variation +//! should be covered. +//! +//! on FRAME benchmarking machinery: +//! +//! - component analysis, why everything must be linear. +//! - how to write benchmarks, how you must think of worst case. +//! - how to run benchmarks. +//! +//! - https://www.shawntabrizi.com/substrate/substrate-storage-deep-dive/ diff --git a/developer-hub/src/reference_docs/frame_composite_enums.rs b/developer-hub/src/reference_docs/frame_composite_enums.rs new file mode 100644 index 00000000000..6051cd53446 --- /dev/null +++ b/developer-hub/src/reference_docs/frame_composite_enums.rs @@ -0,0 +1 @@ +//! # FRAME Composite Enums diff --git a/developer-hub/src/reference_docs/frame_currency.rs b/developer-hub/src/reference_docs/frame_currency.rs new file mode 100644 index 00000000000..ba181373062 --- /dev/null +++ b/developer-hub/src/reference_docs/frame_currency.rs @@ -0,0 +1,8 @@ +//! FRAME Currency Abstractions and Traits +//! +//! Notes: +//! +//! - History, `Currency` trait. +//! - `Hold` and `Freeze` with diagram. +//! - `HoldReason` and `FreezeReason` +//! - This footgun: https://github.com/paritytech/polkadot-sdk/pull/1900#discussion_r1363783609 diff --git a/developer-hub/src/reference_docs/frame_origin.rs b/developer-hub/src/reference_docs/frame_origin.rs new file mode 100644 index 00000000000..a4078377cd7 --- /dev/null +++ b/developer-hub/src/reference_docs/frame_origin.rs @@ -0,0 +1,14 @@ +//! # FRAME Origin +//! +//! Notes: +//! +//! - Def talk about account abstraction and how it is a solved issue in frame. See Gav's talk in +//! Protocol Berg 2023 +//! - system's raw origin, how it is amalgamated with other origins into one type +//! [`frame_composite_enums`] +//! - signed origin +//! - unsigned origin, link to [`fee_less_runtime`] +//! - Root origin, how no one can obtain it. +//! - Abstract origin: how FRAME allows you to express "origin is 2/3 of the this body or 1/2 of +//! that body or half of the token holders". +//! - `type CustomOrigin: EnsureOrigin<_>` in pallets. diff --git a/developer-hub/src/reference_docs/frame_runtime_migration.rs b/developer-hub/src/reference_docs/frame_runtime_migration.rs new file mode 100644 index 00000000000..0616ccbb6f5 --- /dev/null +++ b/developer-hub/src/reference_docs/frame_runtime_migration.rs @@ -0,0 +1,9 @@ +//! # Runtime Runtime Upgrade and Testing +//! +//! +//! Notes: +//! +//! - Flow of things, when does `on_runtime_upgrade` get called. Link to to `Hooks` and its diagram +//! as source of truth. +//! - Data migration and when it is needed. +//! - Look into the pba-lecture. diff --git a/developer-hub/src/reference_docs/frame_system_accounts.rs b/developer-hub/src/reference_docs/frame_system_accounts.rs new file mode 100644 index 00000000000..ae9d2c9e0cb --- /dev/null +++ b/developer-hub/src/reference_docs/frame_system_accounts.rs @@ -0,0 +1,8 @@ +//! # FRAME Accounts +//! +//! How `frame_system` handles accountIds. Nonce. Consumers and Providers, reference counting. + +// - poorly understood topics, needs one great article to rul them all. +// - https://github.com/paritytech/substrate/issues/14425 +// - https://github.com/paritytech/substrate/pull/12951 +// - https://substrate.stackexchange.com/questions/263/what-is-the-meaning-of-the-account-provider-sufficients-and-consumer diff --git a/developer-hub/src/reference_docs/glossary.rs b/developer-hub/src/reference_docs/glossary.rs new file mode 100644 index 00000000000..d0bd6accce6 --- /dev/null +++ b/developer-hub/src/reference_docs/glossary.rs @@ -0,0 +1,62 @@ +//! # Glossary +//! +//! #### State +//! +//! The data around which the blockchain network wishes to come to consensus. Also +//! referred to as "onchain data", "onchain storage" or sometimes just "storage". In UTXO based +//! blockchains, is referred to as "ledger". +//! +//! **Synonyms**: Onchain data, Onchain storage, Storage, Ledger +//! +//! #### State Transition Function +//! +//! The WASM Blob that dictates how the blockchain should transition its state upon encountering new +//! blocks. +//! +//! #### Host +//! +//! The environment that hosts and executes the [state transition function's WASM +//! blob](#state-transition-function). +//! +//! #### Node +//! +//! The full software artifact that contains the [host](#host), but importantly also all the other +//! modules needed to be part of a blockchain network, such as peer-to-peer networking, database and +//! such. +//! +//! **Synonyms**: Client +//! +//! #### Light Node +//! +//! Same as [node](#nodes), but when capable of following the network only through listening to +//! block headers. Usually capable of running in more constrained environments, such as an embedded +//! device, phone, or a web browser. +//! +//! **Synonyms**: Light Client +//! +//! #### Offchain +//! +//! #### Host Function: +//! +//! #### Runtime API: +//! +//! #### Dispatchable: +//! +//! Callable +//! +//! #### Extrinsic +//! +//! +//! #### Pallet +//! +//! #### Full Node +//! +//! #### Archive Node +//! +//! #### Validator +//! +//! #### Collator +//! +//! #### Parachain +//! +//! aka. AppChain. diff --git a/developer-hub/src/reference_docs/light_nodes.rs b/developer-hub/src/reference_docs/light_nodes.rs new file mode 100644 index 00000000000..a6a0a828ef5 --- /dev/null +++ b/developer-hub/src/reference_docs/light_nodes.rs @@ -0,0 +1,7 @@ +//! # Light Clients +//! +//! +//! Notes: should contain only high level information about light clients, then link to how to set +//! it up in PAPI and SubXT +//! https://docs.substrate.io/learn/light-clients-in-substrate-connect/ +//! https://github.com/substrate-developer-hub/substrate-front-end-template/pull/277 diff --git a/developer-hub/src/reference_docs/metadata.rs b/developer-hub/src/reference_docs/metadata.rs new file mode 100644 index 00000000000..702c1c30fd9 --- /dev/null +++ b/developer-hub/src/reference_docs/metadata.rs @@ -0,0 +1 @@ +//! # Metadata diff --git a/developer-hub/src/reference_docs/mod.rs b/developer-hub/src/reference_docs/mod.rs new file mode 100644 index 00000000000..44284394000 --- /dev/null +++ b/developer-hub/src/reference_docs/mod.rs @@ -0,0 +1,99 @@ +//! # Polkadot SDK Reference Docs. +//! +//! This is the entry point for all reference documents that enhance one's learning experience in +//! the Polkadot SDK. +//! +//! Note that this module also contains the [glossary](crate::reference_docs::glossary). +//! +//! ## What is a "reference document"? +//! +//! First, see [why we use rust-docs for everything](crate#why-rust-docs) and our documentation +//! [principles](crate#principles). We acknowledge that as much of the crucial information should be +//! embedded in the low level rust-docs. Then, high level scenarios should be covered in +//! [`crate::guides`]. Finally, we acknowledge that there is a category of information that is: +//! +//! 1. crucial to know. +//! 2. is too high level to be in the rust-doc of any one `type`, `trait` or `fn`. +//! 3. is too low level to be encompassed in a [`crate::guides`]. +//! +//! We call this class of documents "reference documents". Our goal should be to minimize the number +//! of "reference" docs, as they incur maintenance burden. + +/// Learn how Substrate and FRAME use traits and associated types to make modules generic in a +/// type-safe manner. +pub mod trait_based_programming; + +/// Learn about the way Substrate and FRAME view their blockchains as state machines. +pub mod blockchain_state_machines; + +/// The glossary. +pub mod glossary; + +/// Learn about the WASM meta-protocol of all Substrate-based chains. +pub mod wasm_meta_protocol; + +/// Learn about the differences between smart contracts and a FRAME-based runtime. They are both +/// "code stored onchain", but how do they differ? +pub mod runtime_vs_smart_contract; + +/// Learn about how extrinsics are encoded to be transmitted to a node and stored in blocks. +pub mod extrinsic_encoding; + +/// Learn about the signed extensions that form a part of extrinsics. +// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/42 +pub mod signed_extensions; + +/// Learn about *"Origin"* A topic in FRAME that enables complex account abstractions to be built. +// TODO: @shawntabrizi https://github.com/paritytech/polkadot-sdk-docs/issues/43 +pub mod frame_origin; + +/// Learn about how to write safe and defensive code in your FRAME runtime. +// TODO: @CrackTheCode016 https://github.com/paritytech/polkadot-sdk-docs/issues/44 +pub mod safe_defensive_programming; + +/// Learn about composite enums in FRAME-based runtimes, such as "RuntimeEvent" and "RuntimeCall". +pub mod frame_composite_enums; + +/// Learn about how to make a pallet/runtime that is fee-less and instead uses another mechanism to +/// control usage and sybil attacks. +pub mod fee_less_runtime; + +/// Learn about metadata, the main means through which an upgradeable runtime communicates its +/// properties to the outside world. +// TODO: @jsdw https://github.com/paritytech/polkadot-sdk-docs/issues/47 +pub mod metadata; + +/// Learn about how frame-system handles `account-ids`, nonces, consumers and providers. +pub mod frame_system_accounts; + +/// Learn about the currency-related abstractions provided in FRAME. +pub mod frame_currency; + +/// Learn about benchmarking and weight. +// TODO: @shawntabrizi @ggwpez https://github.com/paritytech/polkadot-sdk-docs/issues/50 +pub mod frame_benchmarking_weight; + +/// Learn about chain specification file and the genesis state of the blockchain. +// TODO: @michalkucharczyk https://github.com/paritytech/polkadot-sdk-docs/issues/51 +pub mod chain_spec_genesis; + +/// Learn about all the memory limitations of the WASM runtime when it comes to memory usage. +// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/52 +pub mod wasm_memory; + +/// Learn about Substrate's CLI, and how it can be extended. +// TODO: @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/53 +pub mod cli; + +/// Learn about Substrate's consensus algorithms, and how you can switch between two. +// TODO: @JoshOrndorff @kianenigma https://github.com/paritytech/polkadot-sdk-docs/issues/54 +pub mod consensus_swapping; + +/// Learn about all the advance ways to test your coordinate a rutnime upgrade and data migration. +// TODO: @liamaharon https://github.com/paritytech/polkadot-sdk-docs/issues/55 +pub mod frame_runtime_migration; + +/// Learn about light nodes, how they function, and how Substrate-based chains come +/// light-node-first out of the box. +// TODO: @jsdw @josepot https://github.com/paritytech/polkadot-sdk-docs/issues/68 +pub mod light_nodes; diff --git a/developer-hub/src/reference_docs/runtime_vs_smart_contract.rs b/developer-hub/src/reference_docs/runtime_vs_smart_contract.rs new file mode 100644 index 00000000000..7f96fa1800a --- /dev/null +++ b/developer-hub/src/reference_docs/runtime_vs_smart_contract.rs @@ -0,0 +1,6 @@ +//! Runtime vs. Smart Contracts +//! +//! Notes: +//! +//! Why one can be weighed, and one MUST be metered. +//! https://forum.polkadot.network/t/where-contracts-fail-and-runtimes-chains-are-needed/4464/3 diff --git a/developer-hub/src/reference_docs/safe_defensive_programming.rs b/developer-hub/src/reference_docs/safe_defensive_programming.rs new file mode 100644 index 00000000000..9d0f028e570 --- /dev/null +++ b/developer-hub/src/reference_docs/safe_defensive_programming.rs @@ -0,0 +1 @@ +//! diff --git a/developer-hub/src/reference_docs/signed_extensions.rs b/developer-hub/src/reference_docs/signed_extensions.rs new file mode 100644 index 00000000000..28b1426536b --- /dev/null +++ b/developer-hub/src/reference_docs/signed_extensions.rs @@ -0,0 +1,79 @@ +//! Signed extensions are, briefly, a means for different chains to extend the "basic" extrinsic +//! format with custom data that can be checked by the runtime. +//! +//! # Example +//! +//! Defining a couple of very simple signed extensions looks like the following: +#![doc = docify::embed!("./src/reference_docs/signed_extensions.rs", signed_extensions_example)] + +#[docify::export] +pub mod signed_extensions_example { + use parity_scale_codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::traits::SignedExtension; + + // This doesn't actually check anything, but simply allows + // some arbitrary `u32` to be added to the extrinsic payload + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToPayload(pub u32); + + impl SignedExtension for AddToPayload { + const IDENTIFIER: &'static str = "AddToPayload"; + type AccountId = (); + type Call = (); + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed( + &self, + ) -> Result< + Self::AdditionalSigned, + sp_runtime::transaction_validity::TransactionValidityError, + > { + Ok(()) + } + + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &sp_runtime::traits::DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(()) + } + } + + // This is the opposite; nothing will be added to the extrinsic payload, + // but the AdditionalSigned type (`1234u32`) will be added to the + // payload to be signed. + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] + pub struct AddToSignaturePayload; + + impl SignedExtension for AddToSignaturePayload { + const IDENTIFIER: &'static str = "AddToSignaturePayload"; + type AccountId = (); + type Call = (); + type AdditionalSigned = u32; + type Pre = (); + + fn additional_signed( + &self, + ) -> Result< + Self::AdditionalSigned, + sp_runtime::transaction_validity::TransactionValidityError, + > { + Ok(1234) + } + + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &sp_runtime::traits::DispatchInfoOf, + _len: usize, + ) -> Result { + Ok(()) + } + } +} diff --git a/developer-hub/src/reference_docs/trait_based_programming.rs b/developer-hub/src/reference_docs/trait_based_programming.rs new file mode 100644 index 00000000000..249f4bcbce0 --- /dev/null +++ b/developer-hub/src/reference_docs/trait_based_programming.rs @@ -0,0 +1,229 @@ +//! # Trait-based Programming +//! +//! This document walks you over a peculiar way of using Rust's `trait` items. This pattern is +//! abundantly used within [`frame`] and is therefore paramount important for a smooth transition +//! into it. +//! +//! The rest of this document assumes familiarity with the +//! [Rust book's Advanced Traits](https://doc.rust-lang.org/book/ch19-03-advanced-traits.html) +//! section. +//! Moreover, we use the [`frame::traits::Get`]. +//! +//! First, imagine we are writing a FRAME pallet. We represent this pallet with a `struct Pallet`, +//! and this pallet wants to implement the functionalities of that pallet, for example a simple +//! `transfer` function. For the sake of education, we are interested in having a `MinTransfer` +//! amount, expressed as a [`frame::traits::Get`], which will dictate what is the minimum amount +//! that can be transferred. +//! +//! We can foremost write this as simple as the following snippet: +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", basic)] +//! +//! +//! In this example, we use arbitrary choices for `AccountId`, `Balance` and the `MinTransfer` type. +//! This works great for **one team's purposes** but we have to remember that Substrate and FRAME +//! are written as generic frameworks, intended to be highly configurable. +//! +//! In a broad sense, there are two avenues in exposing configurability: +//! +//! 1. For *values* that need to be generic, for example `MinTransfer`, we attach them to the +//! `Pallet` struct as fields: +//! +//! ``` +//! struct Pallet { +//! min_transfer: u128, +//! } +//! ``` +//! +//! 2. For *types* that need to be generic, we would have to use generic or associated types, such +//! as: +//! +//! ``` +//! struct Pallet { +//! min_transfer: u128, +//! _marker: std::marker::PhantomData, +//! } +//! ``` +//! +//! Substrate and FRAME, for various reasons (performance, correctness, type safety) has opted to +//! use *types* to declare both *values* and *types* as generic. This is the essence of why the +//! `Get` trait exists. +//! +//! This would bring us to the second iteration of the pallet, which would look like: +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", generic)] +//! +//! In this example, we managed to make all 3 of our types generic. Taking the example of the +//! `AccountId`, one should read the above as following: +//! +//! > The `Pallet` does not know what type `AccountId` concretely is, but it knows that it is +//! > something that adheres to being `From<[u8; 32]>`. +//! +//! This method would work, but it suffers from two downsides: +//! +//! 1. It is verbose, each `impl` block would have to reiterate all of the trait bounds. +//! 2. It cannot easily share/inherit generic types. Imagine multiple pallets wanting to be generic +//! over a single `AccountId`. There is no easy way to express that in this model. +//! +//! Finally, this brings us to using traits and associated types on traits to express the above. +//! Trait associated types have the benefit of: +//! +//! 1. Being less verbose, as in effect they can *group multiple `type`s together*. +//! 2. Can inherit from one another by declaring +//! [supertraits](https://doc.rust-lang.org/rust-by-example/trait/supertraits.html). +//! +//! > Interestingly, one downside of associated types is that declaring defaults on them is not +//! > stable yet. In the meantime, we have built our own custom mechanics around declaring defaults +//! for associated types, see [`pallet_default_config_example`]. +//! +//! The last iteration of our code would look like this: +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", trait_based)] +//! +//! Notice how instead of having multiple generics, everything is generic over a single ``, and all types are fetched through `T`, for example `T::AccountId`, `T::MinTransfer`. +//! +//! Finally, imagine all pallets wanting to be generic over `AccountId`. This can be achieved by +//! having individual `trait Configs` declare a shared `trait SystemConfig` as their +//! [supertrait](https://doc.rust-lang.org/rust-by-example/trait/supertraits.html). +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", with_system)] +//! In FRAME, this shared supertrait is [`frame::prelude::frame_system`]. +//! +//! Notice how this made no difference in the syntax of the rest of the code. `T::AccountId` is +//! still a valid type, since `T` implements `Config` and `Config` implies `SystemConfig`, which +//! has a `type AccountId`. +//! +//! Note, in some instances one would need to use what is known as the fully-qualified-syntax to +//! access a type to help the Rust compiler disambiguate. +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", fully_qualified)] +//! +//! This syntax can sometimes become more complicated when you are dealing with nested traits. +//! Consider the following example, in which we fetch the `type Balance` from another trait +//! `CurrencyTrait`. +#![doc = docify::embed!("./src/reference_docs/trait_based_programming.rs", fully_qualified_complicated)] +//! +//! Notice the final `type BalanceOf` and how it is defined. Using such aliases to shorten the +//! length of fully qualified syntax is a common pattern in FRAME. +//! +//! The above example is almost identical to the well-known (and somewhat notorious) `type +//! BalanceOf` that is often used in the context of [`frame::traits::fungible`]. +#![doc = docify::embed!("../substrate/frame/fast-unstake/src/types.rs", BalanceOf)] +//! +//! ## Additional Resources +//! +//! - +//! - [Substrate Seminar - Traits and Generic Types](https://www.youtube.com/watch?v=6cp10jVWNl4) +//! - +#![allow(unused)] + +use frame::traits::Get; + +#[docify::export] +mod basic { + struct Pallet; + + type AccountId = frame::deps::sp_runtime::AccountId32; + type Balance = u128; + type MinTransfer = frame::traits::ConstU128<10>; + + impl Pallet { + fn transfer(_from: AccountId, _to: AccountId, _amount: Balance) { + todo!() + } + } +} + +#[docify::export] +mod generic { + use super::*; + + struct Pallet { + _marker: std::marker::PhantomData<(AccountId, Balance, MinTransfer)>, + } + + impl Pallet + where + Balance: frame::traits::AtLeast32BitUnsigned, + MinTransfer: frame::traits::Get, + AccountId: From<[u8; 32]>, + { + fn transfer(_from: AccountId, _to: AccountId, amount: Balance) { + assert!(amount >= MinTransfer::get()); + unimplemented!(); + } + } +} + +#[docify::export] +mod trait_based { + use super::*; + + trait Config { + type AccountId: From<[u8; 32]>; + type Balance: frame::traits::AtLeast32BitUnsigned; + type MinTransfer: frame::traits::Get; + } + + struct Pallet(std::marker::PhantomData); + impl Pallet { + fn transfer(_from: T::AccountId, _to: T::AccountId, amount: T::Balance) { + assert!(amount >= T::MinTransfer::get()); + unimplemented!(); + } + } +} + +#[docify::export] +mod with_system { + use super::*; + + pub trait SystemConfig { + type AccountId: From<[u8; 32]>; + } + + pub trait Config: SystemConfig { + type Balance: frame::traits::AtLeast32BitUnsigned; + type MinTransfer: frame::traits::Get; + } + + pub struct Pallet(std::marker::PhantomData); + impl Pallet { + fn transfer(_from: T::AccountId, _to: T::AccountId, amount: T::Balance) { + assert!(amount >= T::MinTransfer::get()); + unimplemented!(); + } + } +} + +#[docify::export] +mod fully_qualified { + use super::with_system::*; + + // Simple of using fully qualified syntax. + type AccountIdOf = ::AccountId; +} + +#[docify::export] +mod fully_qualified_complicated { + use super::with_system::*; + + trait CurrencyTrait { + type Balance: frame::traits::AtLeast32BitUnsigned; + fn more_stuff() {} + } + + trait Config: SystemConfig { + type Currency: CurrencyTrait; + } + + struct Pallet(std::marker::PhantomData); + impl Pallet { + fn transfer( + _from: T::AccountId, + _to: T::AccountId, + _amount: <::Currency as CurrencyTrait>::Balance, + ) { + unimplemented!(); + } + } + + /// A common pattern in FRAME. + type BalanceOf = <::Currency as CurrencyTrait>::Balance; +} diff --git a/developer-hub/src/reference_docs/wasm_memory.rs b/developer-hub/src/reference_docs/wasm_memory.rs new file mode 100644 index 00000000000..4f4cda31094 --- /dev/null +++ b/developer-hub/src/reference_docs/wasm_memory.rs @@ -0,0 +1,7 @@ +//! # WASM Memory Limitations. +//! +//! Notes: +//! +//! - Stack: Need to use `Box<_>` +//! - Heap: Substrate imposes a limit. PvF execution has its own limits +//! - Heap: There is also a maximum amount that a single allocation can have. diff --git a/developer-hub/src/reference_docs/wasm_meta_protocol.rs b/developer-hub/src/reference_docs/wasm_meta_protocol.rs new file mode 100644 index 00000000000..f45f4c8fa3c --- /dev/null +++ b/developer-hub/src/reference_docs/wasm_meta_protocol.rs @@ -0,0 +1,113 @@ +//! # WASM Meta Protocol +//! +//! All Substrate based chains adhere to a unique architectural design novel to the Polkadot +//! ecosystem. We refer to this design as the "WASM Meta Protocol". +//! +//! Consider the fact that a traditional blockchain software is usually a monolithic artifact. +//! Upgrading any part of the system implies upgrading the entire system. This has historically led +//! to cumbersome forkful upgrades to be the status quo in the blockchain ecosystem. +//! +//! Moreover, the idea of "storing code in the state" is explored in the context of smart contracts +//! platforms, but has not been expanded further. +//! +//! Substrate mixes these two ideas together, and takes the novel approach of storing the +//! blockchain's main "state transition function" in the main blockchain state, in the same fashion +//! that a smart contract platform stores the code of individual contracts in its state. As noted in +//! [`crate::reference_docs::blockchain_state_machines`], this state transition function is called +//! the **Runtime**, and WASM is chosen as the bytecode. The Runtime is stored under a special key +//! in the state (see +//! [`sp_core::storage::well_known_keys`](../../../sp_core/index.html)) and can be +//! updated as a part of the state transition function's execution, just like a user's account +//! balance can be updated. +//! +//! > Note that while we drew an analogy between smart contracts and runtimes in the above, there +//! > are fundamental differences between the two, explained in +//! > [`crate::reference_docs::runtime_vs_smart_contract`]. +//! +//! The rest of the system that is NOT the state transition function is called the **node**, and +//! is a normal binary that is compiled from Rust to different hardware targets. +//! +//! This design enables all Substrate-based chains to be fork-less-ly upgradeable, because the +//! Runtime can be updates on the fly, within the execution of a block, and the node is (for the +//! most part) oblivious to the change that is happening. +//! +//! Therefore, the high-level architecture of a any Substrate-based chain can be demonstrated as +//! follows: +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/substrate_simple.mmd")] +//! +//! The node and the runtime need to communicate. This is done through two concepts: +//! +//! 1. **Host functions**: a way for the (WASM) runtime to talk to the node. All host functions are +//! defined in [`sp_io`]. For example, [`sp_io::storage`] are the set of host functions that +//! allow the runtime to read and write data to the on-chain state. +//! 2. **Runtime APIs**: a way for the node to talk to the WASM runtime. Runtime APIs are defined +//! using macros and utilities in [`sp_api`]. For example, [`sp_api::Core`] is the most +//! fundamental runtime API that any blockchain must implement in order to be able to (re) +//! execute blocks. +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/substrate_client_runtime.mmd")] +//! +//! A runtime must have a set of runtime APIs in order to have any meaningful blockchain +//! functionality, but it can also expose more APIs. See TODO as an example of how to add custom +//! runtime APIs to your FRAME-based runtime. +//! +//! Similarly, for a runtime to be "compatible" with a node, the node must implement the full set of +//! host functions that the runtime at any point in time requires. Given the fact that a runtime can +//! evolve in time, and a blockchain node (typically) wishes to be capable of re-executing all the +//! previous blocks, this means that a node must always maintain support for the old host functions. +//! This also implies that adding a new host function is a big commitment and should be done with +//! care. This is why, for example, adding a new host function to Polkadot always requires an RFC. +//! +//! ## Node vs. Runtime +//! +//! A common question is: which components of the system end up being part of the node, and which +//! ones of the runtime? +//! +//! Recall from [`crate::reference_docs::blockchain_state_machines`] that the runtime is the state +//! transition function. Anything that needs to influence how your blockchain's state is updated, +//! should be a part of the runtime. For example, the logic around currency, governance, identity or +//! any other application-specific logic that has to do with the state is part of the runtime. +//! +//! Anything that does not have to do with the state-transition function and will only +//! facilitate/enable it is part of the node. For example, the database, networking, and even +//! consensus algorithm are all node-side components. +//! +//! > The consensus is to your runtime what HTTP is to a web-application. It is the underlying +//! > engine that enables trustless execution of the runtime in a distributed manner whilst +//! > maintaining a canonical outcome of that execution. +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/substrate_with_frame.mmd")] +//! +//! ## State +//! +//! From the previous sections, we know that the a database component is part of the node, not the +//! runtime. We also hinted that a set of host functions ([`sp_io::storage`]) are how the runtime +//! issues commands to the node to read/write to the state. Let's dive deeper into this. +//! +//! The state of the blockchain, what we seek to come to consensus about, is indeed *kept* in the +//! node side. Nonetheless, the runtime is the only component that: +//! +//! 1. Can update the state. +//! 2. Can fully interpret the state. +//! +//! In fact, [`sp_core::storage::well_known_keys`] are the only state keys that the node side is +//! aware of. The rest of the state, including what logic the runtime has, what balance each user +//! has and such are all only comprehensible to the runtime. +#![doc = simple_mermaid::mermaid!("../../../docs/mermaid/state.mmd")] +//! +//! In the above diagram, all of the state keys and values are opaque bytes to the node. The node +//! does not know what they mean, and it does not now what is the type of the corresponding value +//! (e.g. if it is a number of a vector). Contrary, the runtime knows both the meaning of their +//! keys, and the type of the values. +//! +//! This opaque-ness is the fundamental reason why Substrate-based chains can fork-less-ly upgrade: +//! because the node side code is kept oblivious to all of the details of the state transition +//! function. Therefore, the state transition function can freely upgrade without the node needing +//! to know. +//! +//! ## Native Runtime +//! +//! TODO +//! +//! +//! ## Example: Block Execution. +//! +//! TODO diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd new file mode 100644 index 00000000000..8fcb74fa0a9 --- /dev/null +++ b/docs/mermaid/IA.mmd @@ -0,0 +1,14 @@ +flowchart + parity[paritytech.github.io] --> devhub[developer_hub] + + devhub --> polkadot_sdk + devhub --> reference_docs + devhub --> tutorial + + polkadot_sdk --> substrate + polkadot_sdk --> frame + polkadot_sdk --> cumulus + polkadot_sdk --> polkadot + polkadot_sdk --> xcm + + diff --git a/docs/mermaid/extrinsics.mmd b/docs/mermaid/extrinsics.mmd new file mode 100644 index 00000000000..4afd4ab8f75 --- /dev/null +++ b/docs/mermaid/extrinsics.mmd @@ -0,0 +1,5 @@ +flowchart TD + E(Extrinsic) ---> I(Inherent); + E --> T(Transaction) + T --> ST("Signed (aka. Transaction)") + T --> UT(Unsigned) diff --git a/docs/mermaid/polkadot_sdk_parachain.mmd b/docs/mermaid/polkadot_sdk_parachain.mmd new file mode 100644 index 00000000000..3f38fce046c --- /dev/null +++ b/docs/mermaid/polkadot_sdk_parachain.mmd @@ -0,0 +1,11 @@ +flowchart LR + subgraph Parachain[A Polkadot Parachain] + ParachainNode[Parachain Node] + ParachainRuntime[Parachain Runtime] + end + + FRAME -.-> ParachainRuntime + Substrate[Substrate Node Libraries] -.-> ParachainNoe + + CumulusC[Cumulus Node Libraries] -.-> ParachainNode + CumulusR[Cumulus Runtime Libraries] -.-> ParachainRuntime diff --git a/docs/mermaid/polkadot_sdk_polkadot.mmd b/docs/mermaid/polkadot_sdk_polkadot.mmd new file mode 100644 index 00000000000..3326cc59383 --- /dev/null +++ b/docs/mermaid/polkadot_sdk_polkadot.mmd @@ -0,0 +1,10 @@ +flowchart LR + + subgraph Polkadot[The Polkadot Relay Chain] + PolkadotNode[Polkadot Node] + PolkadotRuntime[Polkadot Runtime] + end + + FRAME -.-> PolkadotRuntime + Substrate[Substrate Node Libraries] -.-> PolkadotNode + diff --git a/docs/mermaid/polkadot_sdk_substrate.mmd b/docs/mermaid/polkadot_sdk_substrate.mmd new file mode 100644 index 00000000000..dfaf20d241f --- /dev/null +++ b/docs/mermaid/polkadot_sdk_substrate.mmd @@ -0,0 +1,8 @@ +flowchart LR + subgraph SubstrateChain[A Substrate-based blockchain] + Node + Runtime + end + + FRAME -.-> Runtime + Substrate[Substrate Node Libraries] -.-> Node diff --git a/docs/mermaid/state.mmd b/docs/mermaid/state.mmd new file mode 100644 index 00000000000..c72ecbfd156 --- /dev/null +++ b/docs/mermaid/state.mmd @@ -0,0 +1,16 @@ +flowchart TB + subgraph Node[Node's View Of The State 🙈] + direction LR + 0x1234 --> 0x2345 + 0x3456 --> 0x4567 + 0x5678 --> 0x6789 + :code --> code[wasm code] + end + + subgraph Runtime[Runtime's View Of The State 🙉] + direction LR + ab[alice's balance] --> abv[known value] + bb[bob's balance] --> bbv[known value] + cb[charlie's balance] --> cbv[known value] + c2[:code] --> c22[wasm code] + end diff --git a/docs/mermaid/stf.mmd b/docs/mermaid/stf.mmd new file mode 100644 index 00000000000..dd6c7c36de6 --- /dev/null +++ b/docs/mermaid/stf.mmd @@ -0,0 +1,21 @@ +flowchart LR + %%{init: {'flowchart' : {'curve' : 'linear'}}}%% + subgraph BData[Blockchain Database] + direction LR + BN[Block N] -.-> BN1[Block N+1] + end + + subgraph SData[State Database] + direction LR + SN[State N] -.-> SN1[State N+1] -.-> SN2[State N+2] + end + + BN --> STFN[STF] + SN --> STFN[STF] + STFN[STF] --> SN1 + + BN1 --> STFN1[STF] + SN1 --> STFN1[STF] + STFN1[STF] --> SN2 + + diff --git a/docs/mermaid/stf_simple.mmd b/docs/mermaid/stf_simple.mmd new file mode 100644 index 00000000000..5db20cf6156 --- /dev/null +++ b/docs/mermaid/stf_simple.mmd @@ -0,0 +1,4 @@ +flowchart LR + B[Block] --> STF + S[State] --> STF + STF --> NS[New State] diff --git a/docs/mermaid/substrate_client_runtime.mmd b/docs/mermaid/substrate_client_runtime.mmd index 23c3f849224..caab2b62302 100644 --- a/docs/mermaid/substrate_client_runtime.mmd +++ b/docs/mermaid/substrate_client_runtime.mmd @@ -1,10 +1,12 @@ graph TB subgraph Substrate direction LR - subgraph Client + subgraph Node end + subgraph Runtime end - Client --runtime-api--> Runtime - Runtime --host-functions--> Client + + Node --runtime-api--> Runtime + Runtime --host-functions--> Node end diff --git a/docs/mermaid/substrate_dev.mmd b/docs/mermaid/substrate_dev.mmd new file mode 100644 index 00000000000..fc331ce311f --- /dev/null +++ b/docs/mermaid/substrate_dev.mmd @@ -0,0 +1,2 @@ +flowchart LR + T[Using a Template] --> P[Writing Your Own FRAME-Based Pallet] --> C[Custom Node] diff --git a/docs/mermaid/substrate_simple.mmd b/docs/mermaid/substrate_simple.mmd index 475d8be5ef8..a752eaba625 100644 --- a/docs/mermaid/substrate_simple.mmd +++ b/docs/mermaid/substrate_simple.mmd @@ -1,7 +1,7 @@ graph TB subgraph Substrate direction LR - subgraph Client + subgraph Node end subgraph Runtime end diff --git a/docs/mermaid/substrate_with_frame.mmd b/docs/mermaid/substrate_with_frame.mmd index 12d072a3360..173c1757b95 100644 --- a/docs/mermaid/substrate_with_frame.mmd +++ b/docs/mermaid/substrate_with_frame.mmd @@ -1,7 +1,7 @@ graph TB subgraph Substrate direction LR - subgraph Client + subgraph Node Database Networking Consensus @@ -15,6 +15,6 @@ subgraph Substrate Identity end end - Client --runtime-api--> Runtime - Runtime --host-functions--> Client + Node --runtime-api--> Runtime + Runtime --host-functions--> Node end diff --git a/polkadot/node/network/protocol/src/request_response/mod.rs b/polkadot/node/network/protocol/src/request_response/mod.rs index 96f7adeb29b..2df3021343d 100644 --- a/polkadot/node/network/protocol/src/request_response/mod.rs +++ b/polkadot/node/network/protocol/src/request_response/mod.rs @@ -248,8 +248,8 @@ impl Protocol { name, fallback_names, max_request_size: 1_000, - /// Responses are just confirmation, in essence not even a bit. So 100 seems - /// plenty. + // Responses are just confirmation, in essence not even a bit. So 100 seems + // plenty. max_response_size: 100, request_timeout: DISPUTE_REQUEST_TIMEOUT, inbound_queue: tx, diff --git a/polkadot/node/primitives/src/lib.rs b/polkadot/node/primitives/src/lib.rs index dab72bb2a5e..be62145b999 100644 --- a/polkadot/node/primitives/src/lib.rs +++ b/polkadot/node/primitives/src/lib.rs @@ -442,7 +442,7 @@ pub struct CollationSecondedSignal { pub relay_parent: Hash, /// The statement about seconding the collation. /// - /// Anything else than [`Statement::Seconded`](Statement::Seconded) is forbidden here. + /// Anything else than [`Statement::Seconded`] is forbidden here. pub statement: SignedFullStatement, } diff --git a/substrate/Cargo.toml b/substrate/Cargo.toml deleted file mode 100644 index 8fb1be5821b..00000000000 --- a/substrate/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "substrate" -description = "Next-generation framework for blockchain innovation" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.io" -repository.workspace = true -authors.workspace = true -edition.workspace = true -version = "1.0.0" -publish = false - -# The dependencies are only needed for docs. -[dependencies] -simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", rev = "e48b187bcfd5cc75111acd9d241f1bd36604344b" } - -subkey = { path = "bin/utils/subkey" } -chain-spec-builder = { package = "staging-chain-spec-builder", path = "bin/utils/chain-spec-builder" } - -sc-service = { path = "client/service" } -sc-chain-spec = { path = "client/chain-spec" } -sc-cli = { path = "client/cli" } -sc-consensus-aura = { path = "client/consensus/aura" } -sc-consensus-babe = { path = "client/consensus/babe" } -sc-consensus-grandpa = { path = "client/consensus/grandpa" } -sc-consensus-beefy = { path = "client/consensus/beefy" } -sc-consensus-manual-seal = { path = "client/consensus/manual-seal" } -sc-consensus-pow = { path = "client/consensus/pow" } - -sp-runtime = { path = "primitives/runtime" } -frame-support = { path = "frame/support" } diff --git a/substrate/client/allocator/src/lib.rs b/substrate/client/allocator/src/lib.rs index e50d7d54c8e..70ed764bef8 100644 --- a/substrate/client/allocator/src/lib.rs +++ b/substrate/client/allocator/src/lib.rs @@ -18,7 +18,7 @@ //! Collection of allocator implementations. //! //! This crate provides the following allocator implementations: -//! - A freeing-bump allocator: [`FreeingBumpHeapAllocator`](freeing_bump::FreeingBumpHeapAllocator) +//! - A freeing-bump allocator: [`FreeingBumpHeapAllocator`] #![warn(missing_docs)] diff --git a/substrate/client/executor/src/lib.rs b/substrate/client/executor/src/lib.rs index 6ee0ab3512a..25bad81938f 100644 --- a/substrate/client/executor/src/lib.rs +++ b/substrate/client/executor/src/lib.rs @@ -58,7 +58,7 @@ pub use sc_executor_wasmtime::InstantiationStrategy as WasmtimeInstantiationStra /// Extracts the runtime version of a given runtime code. pub trait RuntimeVersionOf { - /// Extract [`RuntimeVersion`](sp_version::RuntimeVersion) of the given `runtime_code`. + /// Extract [`RuntimeVersion`] of the given `runtime_code`. fn runtime_version( &self, ext: &mut dyn Externalities, diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index d518f933df8..843bc351494 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -49,8 +49,7 @@ //! - **Total Issuance:** The total number of units in existence in a system. //! //! - **Reaping an account:** The act of removing an account by resetting its nonce. Happens after -//! its -//! total balance has become zero (or, strictly speaking, less than the Existential Deposit). +//! its total balance has become zero (or, strictly speaking, less than the Existential Deposit). //! //! - **Free Balance:** The portion of a balance that is not reserved. The free balance is the only //! balance that matters for most operations. @@ -59,24 +58,23 @@ //! Reserved balance can still be slashed, but only after all the free balance has been slashed. //! //! - **Imbalance:** A condition when some funds were credited or debited without equal and opposite -//! accounting -//! (i.e. a difference between total issuance and account balances). Functions that result in an -//! imbalance will return an object of the `Imbalance` trait that can be managed within your runtime -//! logic. (If an imbalance is simply dropped, it should automatically maintain any book-keeping -//! such as total issuance.) +//! accounting (i.e. a difference between total issuance and account balances). Functions that +//! result in an imbalance will return an object of the `Imbalance` trait that can be managed within +//! your runtime logic. (If an imbalance is simply dropped, it should automatically maintain any +//! book-keeping such as total issuance.) //! //! - **Lock:** A freeze on a specified amount of an account's free balance until a specified block -//! number. Multiple -//! locks always operate over the same funds, so they "overlay" rather than "stack". +//! number. Multiple locks always operate over the same funds, so they "overlay" rather than +//! "stack". //! //! ### Implementations //! //! The Balances pallet provides implementations for the following traits. If these traits provide //! the functionality that you need, then you can avoid coupling with the Balances pallet. //! -//! - [`Currency`](frame_support::traits::Currency): Functions for dealing with a +//! - [`Currency`]: Functions for dealing with a //! fungible assets system. -//! - [`ReservableCurrency`](frame_support::traits::ReservableCurrency): +//! - [`ReservableCurrency`] //! - [`NamedReservableCurrency`](frame_support::traits::NamedReservableCurrency): //! Functions for dealing with assets that can be reserved from an account. //! - [`LockableCurrency`](frame_support::traits::LockableCurrency): Functions for @@ -105,7 +103,7 @@ //! ``` //! use frame_support::traits::Currency; //! # pub trait Config: frame_system::Config { -//! # type Currency: Currency; +//! # type Currency: Currency; //! # } //! //! pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -120,26 +118,26 @@ //! use frame_support::traits::{WithdrawReasons, LockableCurrency}; //! use sp_runtime::traits::Bounded; //! pub trait Config: frame_system::Config { -//! type Currency: LockableCurrency>; +//! type Currency: LockableCurrency>; //! } //! # struct StakingLedger { -//! # stash: ::AccountId, -//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, -//! # phantom: std::marker::PhantomData, +//! # stash: ::AccountId, +//! # total: <::Currency as frame_support::traits::Currency<::AccountId>>::Balance, +//! # phantom: std::marker::PhantomData, //! # } //! # const STAKING_ID: [u8; 8] = *b"staking "; //! //! fn update_ledger( -//! controller: &T::AccountId, -//! ledger: &StakingLedger +//! controller: &T::AccountId, +//! ledger: &StakingLedger //! ) { -//! T::Currency::set_lock( -//! STAKING_ID, -//! &ledger.stash, -//! ledger.total, -//! WithdrawReasons::all() -//! ); -//! // >::insert(controller, ledger); // Commented out as we don't have access to Staking's storage here. +//! T::Currency::set_lock( +//! STAKING_ID, +//! &ledger.stash, +//! ledger.total, +//! WithdrawReasons::all() +//! ); +//! // >::insert(controller, ledger); // Commented out as we don't have access to Staking's storage here. //! } //! # fn main() {} //! ``` diff --git a/substrate/frame/bounties/src/migrations/v4.rs b/substrate/frame/bounties/src/migrations/v4.rs index 936bac11700..4e6ba934481 100644 --- a/substrate/frame/bounties/src/migrations/v4.rs +++ b/substrate/frame/bounties/src/migrations/v4.rs @@ -110,7 +110,7 @@ pub fn migrate< } /// Some checks prior to migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn pre_migration>( @@ -164,7 +164,7 @@ pub fn pre_migration>( diff --git a/substrate/frame/collective/src/migrations/v4.rs b/substrate/frame/collective/src/migrations/v4.rs index b3326b4251c..300dff23d8e 100644 --- a/substrate/frame/collective/src/migrations/v4.rs +++ b/substrate/frame/collective/src/migrations/v4.rs @@ -76,7 +76,7 @@ pub fn migrate>(old_pallet_name: N) { @@ -104,7 +104,7 @@ pub fn pre_migrate>(old_p } /// Some checks for after migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn post_migrate>(old_pallet_name: N) { diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 15a50165ff3..7bdd329d9b0 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -80,11 +80,7 @@ frame_election_provider_support::generate_solution_type!( /// All events of this pallet. pub(crate) fn multi_phase_events() -> Vec> { - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::MultiPhase(inner) = e { Some(inner) } else { None }) - .collect::>() + System::read_events_for_pallet::>() } /// To from `now` to block `n`. diff --git a/substrate/frame/elections-phragmen/src/migrations/v4.rs b/substrate/frame/elections-phragmen/src/migrations/v4.rs index 7e946371f5c..e78465cd618 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v4.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v4.rs @@ -69,7 +69,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { } /// Some checks prior to migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn pre_migration>(new: N) { @@ -97,7 +97,7 @@ pub fn pre_migration>(new: N) { } /// Some checks for after migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn post_migration() { diff --git a/substrate/frame/elections-phragmen/src/migrations/v5.rs b/substrate/frame/elections-phragmen/src/migrations/v5.rs index 6fac923703f..6e360aa8b8c 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v5.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v5.rs @@ -71,7 +71,7 @@ pub fn pre_migrate_fn(to_migrate: Vec) -> Box() { diff --git a/substrate/frame/examples/kitchensink/src/lib.rs b/substrate/frame/examples/kitchensink/src/lib.rs index 89759dd0bf6..18429bc967d 100644 --- a/substrate/frame/examples/kitchensink/src/lib.rs +++ b/substrate/frame/examples/kitchensink/src/lib.rs @@ -292,9 +292,8 @@ pub mod pallet { } } - /// Allows you to define an enum on the pallet which will then instruct - /// `construct_runtime` to amalgamate all similarly-named enums from other - /// pallets into an aggregate enum. + /// Allows you to define an enum on the pallet which will then instruct `construct_runtime` to + /// amalgamate all similarly-named enums from other pallets into an aggregate enum. #[pallet::composite_enum] pub enum HoldReason { Staking, diff --git a/substrate/frame/fast-unstake/src/types.rs b/substrate/frame/fast-unstake/src/types.rs index 15d0a327e91..3fb5720861f 100644 --- a/substrate/frame/fast-unstake/src/types.rs +++ b/substrate/frame/fast-unstake/src/types.rs @@ -39,6 +39,7 @@ impl frame_support::traits::Get for MaxChecking { } } +#[docify::export] pub(crate) type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// An unstake request. diff --git a/substrate/frame/grandpa/src/migrations/v4.rs b/substrate/frame/grandpa/src/migrations/v4.rs index 8604296b6e5..9daa818071f 100644 --- a/substrate/frame/grandpa/src/migrations/v4.rs +++ b/substrate/frame/grandpa/src/migrations/v4.rs @@ -63,7 +63,7 @@ pub fn migrate>(new_pallet_name: N) -> Weight { } /// Some checks prior to migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn pre_migration>(new: N) { @@ -99,7 +99,7 @@ pub fn pre_migration>(new: N) { } /// Some checks for after migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn post_migration() { diff --git a/substrate/frame/membership/src/migrations/v4.rs b/substrate/frame/membership/src/migrations/v4.rs index 38e97af51a0..9b80aca8684 100644 --- a/substrate/frame/membership/src/migrations/v4.rs +++ b/substrate/frame/membership/src/migrations/v4.rs @@ -77,7 +77,7 @@ pub fn migrate>(old_pallet_name: N, new_pallet_name: N) { @@ -105,7 +105,7 @@ pub fn pre_migrate>(old_pallet_name: N, new_ } /// Some checks for after migration. This can be linked to -/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing. /// /// Panics if anything goes wrong. pub fn post_migrate>(old_pallet_name: N, new_pallet_name: N) { diff --git a/substrate/frame/session/src/historical/offchain.rs b/substrate/frame/session/src/historical/offchain.rs index 1b4d53b74b4..95f4d762949 100644 --- a/substrate/frame/session/src/historical/offchain.rs +++ b/substrate/frame/session/src/historical/offchain.rs @@ -17,13 +17,11 @@ //! Off-chain logic for creating a proof based data provided by on-chain logic. //! -//! Validator-set extracting an iterator from an off-chain worker stored list containing -//! historical validator-sets. -//! Based on the logic of historical slashing, but the validation is done off-chain. +//! Validator-set extracting an iterator from an off-chain worker stored list containing historical +//! validator-sets. Based on the logic of historical slashing, but the validation is done off-chain. //! Use [`fn store_current_session_validator_set_to_offchain()`](super::onchain) to store the -//! required data to the offchain validator set. -//! This is used in conjunction with [`ProvingTrie`](super::ProvingTrie) and -//! the off-chain indexing API. +//! required data to the offchain validator set. This is used in conjunction with [`ProvingTrie`] +//! and the off-chain indexing API. use sp_runtime::{ offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, diff --git a/substrate/frame/src/lib.rs b/substrate/frame/src/lib.rs index 1a8350405a8..59cd9237888 100644 --- a/substrate/frame/src/lib.rs +++ b/substrate/frame/src/lib.rs @@ -15,10 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! > Made for [![polkadot]](https://polkadot.network) -//! -//! [polkadot]: https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white -//! //! # FRAME //! //! ```no_compile @@ -34,14 +30,21 @@ //! > **F**ramework for **R**untime **A**ggregation of **M**odularized **E**ntities: Substrate's //! > State Transition Function (Runtime) Framework. //! +//! ## Documentation +//! +//! See [`polkadot_sdk::frame`](../developer_hub/polkadot_sdk/frame_runtime/index.html). +//! //! ## Warning: Experimental //! //! This crate and all of its content is experimental, and should not yet be used in production. //! -//! ## Getting Started +//! ## Underlying dependencies //! -//! TODO: link to `developer_hub::polkadot_sdk::frame`. The `developer_hub` hasn't been published -//! yet, this can be updated once it is linkable. +//! This crate is an amalgamation of multiple other crates that are often used together to compose a +//! pallet. It is not necessary to use it, and it may fall short for certain purposes. +//! +//! In short, this crate only re-exports types and traits from multiple sources. All of these +//! sources are listed (and re-exported again) in [`deps`]. #![cfg_attr(not(feature = "std"), no_std)] #![cfg(feature = "experimental")] @@ -54,9 +57,19 @@ /// `#[pallet::bar]` inside the mod. pub use frame_support::pallet; +pub use frame_support::pallet_macros::{import_section, pallet_section}; + /// The logging library of the runtime. Can normally be the classic `log` crate. pub use log; +/// A list of all macros used within the main [`pallet`] macro. +/// +/// Note: All of these macros are "stubs" and not really usable outside `#[pallet] mod pallet { .. +/// }`. They are mainly provided for documentation and IDE support. +pub mod pallet_macros { + pub use frame_support::{derive_impl, pallet, pallet_macros::*}; +} + /// The main prelude of FRAME. /// /// This prelude should almost always be the first line of code in any pallet or runtime. @@ -78,9 +91,6 @@ pub mod prelude { /// Pallet prelude of `frame-support`. /// /// Note: this needs to revised once `frame-support` evolves. - // `frame-support` will be break down https://github.com/paritytech/polkadot-sdk/issues/127 and its reexports will - // most likely change. These wildcard reexportings can be optimized once `frame-support` has - // changed. #[doc(no_inline)] pub use frame_support::pallet_prelude::*; @@ -156,6 +166,9 @@ pub mod runtime { /// Types to define your runtime version. pub use sp_version::{create_runtime_str, runtime_version, RuntimeVersion}; + /// Macro to implement runtime APIs. + pub use sp_api::impl_runtime_apis; + #[cfg(feature = "std")] pub use sp_version::NativeVersion; } @@ -180,9 +193,6 @@ pub mod runtime { pub use sp_inherents::{CheckInherentsResult, InherentData}; pub use sp_runtime::ApplyExtrinsicResult; - /// Macro to implement runtime APIs. - pub use sp_api::impl_runtime_apis; - pub use frame_system_rpc_runtime_api::*; pub use sp_api::{self, *}; pub use sp_block_builder::*; diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index ec411891885..682b135fa14 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -1097,8 +1097,11 @@ pub fn weight(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() } -/// Compact encoding for arguments can be achieved via `#[pallet::compact]`. The function must -/// return a `DispatchResultWithPostInfo` or `DispatchResult`. +/// +/// --- +/// +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). #[proc_macro_attribute] pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1108,7 +1111,7 @@ pub fn compact(_: TokenStream, _: TokenStream) -> TokenStream { /// --- /// /// **Rust-Analyzer users**: See the documentation of the Rust item in -/// `frame_support::pallet_macros::call`. +/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). #[proc_macro_attribute] pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() @@ -1117,41 +1120,10 @@ pub fn call(_: TokenStream, _: TokenStream) -> TokenStream { /// Each dispatchable may also be annotated with the `#[pallet::call_index($idx)]` attribute, /// which explicitly defines the codec index for the dispatchable function in the `Call` enum. /// -/// All call indexes start from 0, until it encounters a dispatchable function with a defined -/// call index. The dispatchable function that lexically follows the function with a defined -/// call index will have that call index, but incremented by 1, e.g. if there are 3 -/// dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn bar` -/// has a call index of 10, then `fn qux` will have an index of 11, instead of 1. -/// -/// All arguments must implement [`Debug`], [`PartialEq`], [`Eq`], `Decode`, `Encode`, and -/// [`Clone`]. For ease of use, bound by the trait `frame_support::pallet_prelude::Member`. -/// -/// If no `#[pallet::call]` exists, then a default implementation corresponding to the -/// following code is automatically generated: -/// -/// ```ignore -/// #[pallet::call] -/// impl Pallet {} -/// ``` -/// -/// **WARNING**: modifying dispatchables, changing their order, removing some, etc., must be -/// done with care. Indeed this will change the outer runtime call type (which is an enum with -/// one variant per pallet), this outer runtime call can be stored on-chain (e.g. in -/// `pallet-scheduler`). Thus migration might be needed. To mitigate against some of this, the -/// `#[pallet::call_index($idx)]` attribute can be used to fix the order of the dispatchable so -/// that the `Call` enum encoding does not change after modification. As a general rule of -/// thumb, it is therefore adventageous to always add new calls to the end so you can maintain -/// the existing order of calls. -/// -/// ### Macro expansion -/// -/// The macro creates an enum `Call` with one variant per dispatchable. This enum implements: -/// [`Clone`], [`Eq`], [`PartialEq`], [`Debug`] (with stripped implementation in `not("std")`), -/// `Encode`, `Decode`, `GetDispatchInfo`, `GetCallName`, `GetCallIndex` and -/// `UnfilteredDispatchable`. +/// --- /// -/// The macro implements the `Callable` trait on `Pallet` and a function `call_functions` -/// which returns the dispatchable metadata. +/// **Rust-Analyzer users**: See the documentation of the Rust item in +/// [`frame_support::pallet_macros::call`](../../frame_support/pallet_macros/attr.call.html). #[proc_macro_attribute] pub fn call_index(_: TokenStream, _: TokenStream) -> TokenStream { pallet_macro_stub() diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 519e1e61895..f78f2baa9d1 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -26,6 +26,7 @@ use syn::{spanned::Spanned, ExprClosure}; mod keyword { syn::custom_keyword!(Call); syn::custom_keyword!(OriginFor); + syn::custom_keyword!(RuntimeOrigin); syn::custom_keyword!(weight); syn::custom_keyword!(call_index); syn::custom_keyword!(compact); @@ -158,10 +159,10 @@ impl syn::parse::Parse for ArgAttrIsCompact { } } -/// Check the syntax is `OriginFor` or `&OriginFor`. +/// Check the syntax is `OriginFor`, `&OriginFor` or `T::RuntimeOrigin`. pub fn check_dispatchable_first_arg_type(ty: &syn::Type, is_ref: bool) -> syn::Result<()> { - pub struct CheckDispatchableFirstArg(bool); - impl syn::parse::Parse for CheckDispatchableFirstArg { + pub struct CheckOriginFor(bool); + impl syn::parse::Parse for CheckOriginFor { fn parse(input: syn::parse::ParseStream) -> syn::Result { let is_ref = input.parse::().is_ok(); input.parse::()?; @@ -173,14 +174,27 @@ pub fn check_dispatchable_first_arg_type(ty: &syn::Type, is_ref: bool) -> syn::R } } - let result = syn::parse2::(ty.to_token_stream()); - return match result { - Ok(CheckDispatchableFirstArg(has_ref)) if is_ref == has_ref => Ok(()), - _ => { + pub struct CheckRuntimeOrigin; + impl syn::parse::Parse for CheckRuntimeOrigin { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + input.parse::()?; + input.parse::()?; + + Ok(Self) + } + } + + let result_origin_for = syn::parse2::(ty.to_token_stream()); + let result_runtime_origin = syn::parse2::(ty.to_token_stream()); + return match (result_origin_for, result_runtime_origin) { + (Ok(CheckOriginFor(has_ref)), _) if is_ref == has_ref => Ok(()), + (_, Ok(_)) => Ok(()), + (_, _) => { let msg = if is_ref { "Invalid type: expected `&OriginFor`" } else { - "Invalid type: expected `OriginFor`" + "Invalid type: expected `OriginFor` or `T::RuntimeOrigin`" }; return Err(syn::Error::new(ty.span(), msg)) }, @@ -282,8 +296,8 @@ impl CallDef { 0 if dev_mode => CallWeightDef::DevModeDefault, 0 => return Err(syn::Error::new( method.sig.span(), - "A pallet::call requires either a concrete `#[pallet::weight($expr)]` or an - inherited weight from the `#[pallet:call(weight($type))]` attribute, but + "A pallet::call requires either a concrete `#[pallet::weight($expr)]` or an + inherited weight from the `#[pallet:call(weight($type))]` attribute, but none were given.", )), 1 => match weight_attrs.pop().unwrap() { diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index e57227f9b40..c81dba12766 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -36,7 +36,8 @@ use sp_weights::Weight; /// returned from a dispatch. pub type DispatchResultWithPostInfo = sp_runtime::DispatchResultWithInfo; -/// Unaugmented version of `DispatchResultWithPostInfo` that can be returned from +#[docify::export] +/// Un-augmented version of `DispatchResultWithPostInfo` that can be returned from /// dispatchable functions and is automatically converted to the augmented type. Should be /// used whenever the `PostDispatchInfo` does not need to be overwritten. As this should /// be the common case it is the implicit return type when none is specified. diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 2ec3b24db0c..fd348f62b4f 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -47,6 +47,8 @@ pub mod __private { pub use sp_core::{OpaqueMetadata, Void}; pub use sp_core_hashing_proc_macro; pub use sp_inherents; + #[cfg(feature = "std")] + pub use sp_io::TestExternalities; pub use sp_io::{self, hashing, storage::root as storage_root}; pub use sp_metadata_ir as metadata_ir; #[cfg(feature = "std")] @@ -2226,13 +2228,159 @@ pub use frame_support_procedural::pallet; /// Contains macro stubs for all of the pallet:: macros pub mod pallet_macros { pub use frame_support_procedural::{ - call_index, compact, composite_enum, config, disable_frame_system_supertrait_check, error, - event, extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, + composite_enum, config, disable_frame_system_supertrait_check, error, event, + extra_constants, feeless_if, generate_deposit, generate_store, getter, hooks, import_section, inherent, no_default, no_default_bounds, origin, pallet_section, storage_prefix, storage_version, type_value, unbounded, validate_unsigned, weight, whitelist_storage, }; + /// Allows a pallet to declare a set of functions as a *dispatchable extrinsic*. In + /// slightly simplified terms, this macro declares the set of "transactions" of a pallet. + /// + /// > The exact definition of **extrinsic** can be found in + /// > [`sp_runtime::generic::UncheckedExtrinsic`]. + /// + /// A **dispatchable** is a common term in FRAME, referring to process of constructing a + /// function, and dispatching it with the correct inputs. This is commonly used with + /// extrinsics, for example "an extrinsic has been dispatched". See + /// [`sp_runtime::traits::Dispatchable`] and [`crate::traits::UnfilteredDispatchable`]. + /// + /// ## Call Enum + /// + /// The macro is called `call` (rather than `#[pallet::extrinsics]`) because of the + /// generation of a `enum Call`. This enum contains only the encoding of the function + /// arguments of the dispatchable, alongside the information needed to route it to the + /// correct function. + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// pub mod custom_pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// # use frame_support::traits::BuildGenesisConfig; + /// #[pallet::call] + /// impl Pallet { + /// pub fn some_dispatchable(_origin: OriginFor, _input: u32) -> DispatchResult { + /// Ok(()) + /// } + /// pub fn other(_origin: OriginFor, _input: u64) -> DispatchResult { + /// Ok(()) + /// } + /// } + /// + /// // generates something like: + /// // enum Call { + /// // some_dispatchable { input: u32 } + /// // other { input: u64 } + /// // } + /// } + /// + /// fn main() { + /// # use frame_support::{derive_impl, construct_runtime}; + /// # use frame_support::__private::codec::Encode; + /// # use frame_support::__private::TestExternalities; + /// # use frame_support::traits::UnfilteredDispatchable; + /// # impl custom_pallet::Config for Runtime {} + /// # #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] + /// # impl frame_system::Config for Runtime { + /// # type Block = frame_system::mocking::MockBlock; + /// # } + /// construct_runtime! { + /// pub struct Runtime { + /// System: frame_system, + /// Custom: custom_pallet + /// } + /// } + /// + /// # TestExternalities::new_empty().execute_with(|| { + /// let origin: RuntimeOrigin = frame_system::RawOrigin::Signed(10).into(); + /// // calling into a dispatchable from within the runtime is simply a function call. + /// let _ = custom_pallet::Pallet::::some_dispatchable(origin.clone(), 10); + /// + /// // calling into a dispatchable from the outer world involves constructing the bytes of + /// let call = custom_pallet::Call::::some_dispatchable { input: 10 }; + /// let _ = call.clone().dispatch_bypass_filter(origin); + /// + /// // the routing of a dispatchable is simply done through encoding of the `Call` enum, + /// // which is the index of the variant, followed by the arguments. + /// assert_eq!(call.encode(), vec![0u8, 10, 0, 0, 0]); + /// + /// // notice how in the encoding of the second function, the first byte is different and + /// // referring to the second variant of `enum Call`. + /// let call = custom_pallet::Call::::other { input: 10 }; + /// assert_eq!(call.encode(), vec![1u8, 10, 0, 0, 0, 0, 0, 0, 0]); + /// # }); + /// } + /// ``` + /// + /// Further properties of dispatchable functions are as follows: + /// + /// - Unless if annotated by `dev_mode`, it must contain [`weight`] to denote the + /// pre-dispatch weight consumed. + /// - The dispatchable must declare its index via [`call_index`], which can override the + /// position of a function in `enum Call`. + /// - The first argument is always an `OriginFor` (or `T::RuntimeOrigin`). + /// - The return type is always [`crate::dispatch::DispatchResult`] (or + /// [`crate::dispatch::DispatchResultWithPostInfo`]). + /// + /// **WARNING**: modifying dispatchables, changing their order (i.e. using [`call_index`]), + /// removing some, etc., must be done with care. This will change the encoding of the , and + /// the call can be stored on-chain (e.g. in `pallet-scheduler`). Thus, migration might be + /// needed. This is why the use of `call_index` is mandatory by default in FRAME. + /// + /// ## Default Behavior + /// + /// If no `#[pallet::call]` exists, then a default implementation corresponding to the + /// following code is automatically generated: + /// + /// ```ignore + /// #[pallet::call] + /// impl Pallet {} + /// ``` + pub use frame_support_procedural::call; + + /// Enforce the index of a variant in the generated `enum Call`. See [`call`] for more + /// information. + /// + /// All call indexes start from 0, until it encounters a dispatchable function with a + /// defined call index. The dispatchable function that lexically follows the function with + /// a defined call index will have that call index, but incremented by 1, e.g. if there are + /// 3 dispatchable functions `fn foo`, `fn bar` and `fn qux` in that order, and only `fn + /// bar` has a call index of 10, then `fn qux` will have an index of 11, instead of 1. + pub use frame_support_procedural::call_index; + + /// Declares the arguments of a [`call`] function to be encoded using + /// [`codec::Compact`]. This will results in smaller extrinsic encoding. + /// + /// A common example of `compact` is for numeric values that are often times far far away + /// from their theoretical maximum. For example, in the context of a crypto-currency, the + /// balance of an individual account is oftentimes way less than what the numeric type + /// allows. In all such cases, using `compact` is sensible. + /// + /// ``` + /// #[frame_support::pallet(dev_mode)] + /// pub mod custom_pallet { + /// # use frame_support::pallet_prelude::*; + /// # use frame_system::pallet_prelude::*; + /// # #[pallet::config] + /// # pub trait Config: frame_system::Config {} + /// # #[pallet::pallet] + /// # pub struct Pallet(_); + /// # use frame_support::traits::BuildGenesisConfig; + /// #[pallet::call] + /// impl Pallet { + /// pub fn some_dispatchable(_origin: OriginFor, #[pallet::compact] _input: u32) -> DispatchResult { + /// Ok(()) + /// } + /// } + /// } + pub use frame_support_procedural::compact; + /// Allows you to define the genesis configuration for the pallet. /// /// Item is defined as either an enum or a struct. It needs to be public and implement the diff --git a/substrate/frame/support/src/migrations.rs b/substrate/frame/support/src/migrations.rs index a9eb460421f..bfd62c8611c 100644 --- a/substrate/frame/support/src/migrations.rs +++ b/substrate/frame/support/src/migrations.rs @@ -224,8 +224,7 @@ impl PalletVersionToStorageVersionHelper for T { } } -/// Migrate from the `PalletVersion` struct to the new -/// [`StorageVersion`](crate::traits::StorageVersion) struct. +/// Migrate from the `PalletVersion` struct to the new [`StorageVersion`] struct. /// /// This will remove all `PalletVersion's` from the state and insert the current storage version. pub fn migrate_from_pallet_version_to_storage_version< diff --git a/substrate/frame/support/src/storage/child.rs b/substrate/frame/support/src/storage/child.rs index e54002d18db..76e6f4ee402 100644 --- a/substrate/frame/support/src/storage/child.rs +++ b/substrate/frame/support/src/storage/child.rs @@ -165,9 +165,9 @@ pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageRe /// guarantee that the subsequent call is in a new block; in this case the previous call's result /// cursor need not be passed in an a `None` may be passed instead. This exception may be useful /// then making this call solely from a block-hook such as `on_initialize`. -/// -/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the -/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted. + +/// Returns [`MultiRemovalResults`] to inform about the result. Once the resultant `maybe_cursor` +/// field is `None`, then no further items remain to be deleted. /// /// NOTE: After the initial call for any given child storage, it is important that no keys further /// keys are inserted. If so, then they may or may not be deleted by subsequent calls. diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index 7f39a3fdad8..c77de1f976f 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -1583,7 +1583,7 @@ pub trait StorageTryAppend: StorageDecodeLength + private::Sealed { fn bound() -> usize; } -/// Storage value that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +/// Storage value that is capable of [`StorageTryAppend`]. pub trait TryAppendValue, I: Encode> { /// Try and append the `item` into the storage item. /// @@ -1612,7 +1612,7 @@ where } } -/// Storage map that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +/// Storage map that is capable of [`StorageTryAppend`]. pub trait TryAppendMap, I: Encode> { /// Try and append the `item` into the storage map at the given `key`. /// @@ -1646,7 +1646,7 @@ where } } -/// Storage double map that is capable of [`StorageTryAppend`](crate::storage::StorageTryAppend). +/// Storage double map that is capable of [`StorageTryAppend`]. pub trait TryAppendDoubleMap, I: Encode> { /// Try and append the `item` into the storage double map at the given `key`. /// diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 08954bb6ab5..a791c313b4a 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -97,7 +97,7 @@ note: required because it appears within the type `RuntimeEvent` | ||_- in this macro invocation ... | note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.4/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -129,7 +129,7 @@ note: required because it appears within the type `RuntimeEvent` | ||_- in this macro invocation ... | note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.4/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` @@ -301,7 +301,7 @@ note: required because it appears within the type `RuntimeCall` | ||_- in this macro invocation ... | note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.4/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -334,7 +334,7 @@ note: required because it appears within the type `RuntimeCall` | ||_- in this macro invocation ... | note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.4/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` diff --git a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr index c04729a2438..1f814eaa407 100644 --- a/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/call_invalid_origin_type.stderr @@ -1,4 +1,4 @@ -error: Invalid type: expected `OriginFor` +error: Invalid type: expected `OriginFor` or `T::RuntimeOrigin` --> tests/pallet_ui/call_invalid_origin_type.rs:34:22 | 34 | pub fn foo(origin: u8) {} diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index b61b4d531e2..8f7e0052fe0 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -25,7 +25,7 @@ sp-runtime = { path = "../../primitives/runtime", default-features = false, feat sp-std = { path = "../../primitives/std", default-features = false} sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } -docify = "0.2.0" +docify = "0.2.6" [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 1b8dd6a9367..15dd047fdb0 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -1018,6 +1018,7 @@ impl_ensure_origin_with_arg_ignoring_arg! { {} } +#[docify::export] /// Ensure that the origin `o` represents a signed extrinsic (i.e. transaction). /// Returns `Ok` with the account that signed the extrinsic or an `Err` otherwise. pub fn ensure_signed(o: OuterOrigin) -> Result @@ -1372,6 +1373,7 @@ impl Pallet { /// NOTE: Events not registered at the genesis block and quietly omitted. pub fn deposit_event_indexed(topics: &[T::Hash], event: T::RuntimeEvent) { let block_number = Self::block_number(); + // Don't populate events on genesis. if block_number.is_zero() { return @@ -1555,12 +1557,7 @@ impl Pallet { /// NOTE: Events not registered at the genesis block and quietly omitted. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] pub fn events() -> Vec> { - debug_assert!( - !Self::block_number().is_zero(), - "events not registered at the genesis block" - ); - // Dereferencing the events here is fine since we are not in the - // memory-restricted runtime. + // Dereferencing the events here is fine since we are not in the memory-restricted runtime. Self::read_events_no_consensus().map(|e| *e).collect() } @@ -1581,6 +1578,21 @@ impl Pallet { Events::::stream_iter() } + /// Read and return the events of a specific pallet, as denoted by `E`. + /// + /// This is useful for a pallet that wishes to read only the events it has deposited into + /// `frame_system` using the standard `fn deposit_event`. + pub fn read_events_for_pallet() -> Vec + where + T::RuntimeEvent: TryInto, + { + Events::::get() + .into_iter() + .map(|er| er.event) + .filter_map(|e| e.try_into().ok()) + .collect::<_>() + } + /// Set the block number to something in particular. Can be used as an alternative to /// `initialize` for tests that don't need to bother with the other environment entries. #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] diff --git a/substrate/frame/tips/src/migrations/v4.rs b/substrate/frame/tips/src/migrations/v4.rs index 35569633d1b..2404c6de1a1 100644 --- a/substrate/frame/tips/src/migrations/v4.rs +++ b/substrate/frame/tips/src/migrations/v4.rs @@ -90,7 +90,7 @@ pub fn migrate { /// Who this purports to be from and the number of extrinsics have come before diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 1cdc0b8e405..6ac381babee 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -43,16 +43,37 @@ const EXTRINSIC_FORMAT_VERSION: u8 = 4; /// The `SingaturePayload` of `UncheckedExtrinsic`. type UncheckedSignaturePayload = (Address, Signature, Extra); -/// A extrinsic right from the external world. This is unchecked and so -/// can contain a signature. +/// An extrinsic right from the external world. This is unchecked and so can contain a signature. +/// +/// An extrinsic is formally described as any external data that is originating from the outside of +/// the runtime and fed into the runtime as a part of the block-body. +/// +/// Inherents are special types of extrinsics that are placed into the block by the block-builder. +/// They are unsigned because the assertion is that they are "inherently true" by virtue of getting +/// past all validators. +/// +/// Transactions are all other statements provided by external entities that the chain deems values +/// and decided to include in the block. This value is typically in the form of fee payment, but it +/// could in principle be any other interaction. Transactions are either signed or unsigned. A +/// sensible transaction pool should ensure that only transactions that are worthwhile are +/// considered for block-building. +#[doc = simple_mermaid::mermaid!("../../../../../docs/mermaid/extrinsics.mmd")] +/// This type is by no means enforced within Substrate, but given its genericness, it is highly +/// likely that for most use-cases it will suffice. Thus, the encoding of this type will dictate +/// exactly what bytes should be sent to a runtime to transact with it. +/// +/// This can be checked using [`Checkable`], yielding a [`CheckedExtrinsic`], which is the +/// counterpart of this type after its signature (and other non-negotiable validity checks) have +/// passed. #[derive(PartialEq, Eq, Clone)] pub struct UncheckedExtrinsic where Extra: SignedExtension, { - /// The signature, address, number of extrinsics have come before from - /// the same signer and an era describing the longevity of this transaction, - /// if this is a signed extrinsic. + /// The signature, address, number of extrinsics have come before from the same signer and an + /// era describing the longevity of this transaction, if this is a signed extrinsic. + /// + /// `None` if it is unsigned or an inherent. pub signature: Option>, /// The function that should be called. pub function: Call, @@ -286,6 +307,7 @@ where } } +#[docify::export(unchecked_extrinsic_encode_impl)] impl Encode for UncheckedExtrinsic where Address: Encode, diff --git a/substrate/primitives/runtime/src/offchain/storage_lock.rs b/substrate/primitives/runtime/src/offchain/storage_lock.rs index 1b795978447..116e1578815 100644 --- a/substrate/primitives/runtime/src/offchain/storage_lock.rs +++ b/substrate/primitives/runtime/src/offchain/storage_lock.rs @@ -250,7 +250,7 @@ impl Lockable for BlockAndTime { /// /// A lock that is persisted in the DB and provides the ability to guard against /// concurrent access in an off-chain worker, with a defined expiry deadline -/// based on the concrete [`Lockable`](Lockable) implementation. +/// based on the concrete [`Lockable`] implementation. pub struct StorageLock<'a, L = Time> { // A storage value ref which defines the DB entry representing the lock. value_ref: StorageValueRef<'a>, diff --git a/substrate/src/lib.rs b/substrate/src/lib.rs deleted file mode 100644 index b5a583fcfcf..00000000000 --- a/substrate/src/lib.rs +++ /dev/null @@ -1,242 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! # Substrate -//! -//! Substrate is a Rust framework for building blockchains in a modular and extensible way. While in -//! itself un-opinionated, it is the main engine behind the Polkadot ecosystem. -//! -//! [![github]](https://github.com/paritytech/polkadot-sdk/tree/master/substrate/) - [![polkadot]](https://polkadot.network) -//! -//! This crate in itself does not contain any code and is just meant ot be a documentation hub for -//! substrate-based crates. -//! -//! ## Overview -//! -//! Substrate approaches blockchain development with an acknowledgement of a few self-evident -//! truths: -//! -//! 1. Society and technology evolves. -//! 2. Humans are fallible. -//! -//! This, specifically, makes the task of designing a correct, safe and long-lasting blockchain -//! system hard. -//! -//! Nonetheless, in order to achieve this goal, substrate embraces the following: -//! -//! 1. Use of **Rust** as a modern, and safe programming language, which limits human error through -//! various means, most notably memory safety. -//! 2. Substrate is written from the ground-up with a generic, modular and extensible design. This -//! ensures that software components can be easily swapped and upgraded. Examples of this is -//! multiple consensus mechanisms provided by Substrate, as listed below. -//! 3. Lastly, the final blockchain system created with the above properties needs to be -//! upgradeable. In order to achieve this, Substrate is designed as a meta-protocol, whereby the -//! application logic of the blockchain (called "Runtime") is encoded as a Wasm blob, and is -//! stored onchain. The rest of the system (called "Client") acts as the executor of the Wasm -//! blob. -//! -//! In essence, the meta-protocol of all Substrate based chains is the "Runtime as Wasm blob" -//! accord. This enables the Runtime to become inherently upgradeable (without forks). The upgrade -//! is merely a matter of the Wasm blob being changed in the chain state, which is, in principle, -//! same as updating an account's balance. -//! -//! ### Architecture -//! -//! Therefore, Substrate can be visualized as follows: -#![doc = simple_mermaid::mermaid!("../../docs/mermaid/substrate_simple.mmd")] -//! -//! The client and the runtime of course need to communicate. This is done through two concepts: -//! -//! 1. Host functions: a way for the (Wasm) runtime to talk to the client. All host functions are -//! defined in [`sp-io`]. For example, [`sp-io::storage`] are the set of host functions that -//! allow the runtime to read and write data to the on-chain state. -//! 2. Runtime APIs: a way for the client to talk to the Wasm runtime. Runtime APIs are defined -//! using macros and utilities in [`sp-api`]. For example, [`sp-api::Core`] is the most basic -//! runtime API that any blockchain must implement in order to be able to (re) execute blocks. -#![doc = simple_mermaid::mermaid!("../../docs/mermaid/substrate_client_runtime.mmd")] -//! -//! [`FRAME`], Substrate's default runtime development library takes the above even further by -//! embracing a declarative programming model whereby correctness is enhanced and the system is -//! highly configurable through parameterization. -//! -//! All in all, this design enables all substrate-based chains to achieve forkless, self-enacting -//! upgrades out of the box. Combined with governance abilities that are shipped with `FRAME`, this -//! enables a chain to survive the test of time. -//! -//! ## How to Get Stared -//! -//! Most developers want to leave the client side code as-is, and focus on the runtime. To do so, -//! look into the [`frame`] crate, which is the entry point crate into runtime development with -//! FRAME. -//! -//! > Side note, it is entirely possible to craft a substrate-based runtime without FRAME, an -//! > example of which can be found [here](https://github.com/JoshOrndorff/frameless-node-template). -//! -//! In more broad terms, the following avenues exist into developing with substrate: -//! -//! * **Templates**: A number of substrate-based templates exist and they can be used for various -//! purposes, with zero to little additional code needed. All of these templates contain runtimes -//! that are highly configurable and are likely suitable for basic needs. -//! * [`FRAME`]: If need, one can customize that runtime even further, by using `FRAME` and -//! developing custom modules. -//! * **Core**: To the contrary, some developers may want to customize the client side software to -//! achieve novel goals such as a new consensus engine, or a new database backend. While -//! Substrate's main configurability is in the runtime, the client is also highly generic and can -//! be customized to a great extent. -//! -//! ## Structure -//! -//! Substrate is a massive cargo workspace with hundreds of crates, therefore it is useful to know -//! how to navigate its crates. -//! -//! In broad terms, it is divided into three categories: -//! -//! * `sc-*` (short for *substrate-client*) crates, located under `./client` folder. These are all -//! the client crates. Notable examples are crates such as [`sc-network`], various consensus -//! crates, [`sc-rpc-api`] and [`sc-client-db`], all of which are expected to reside in the client -//! side. -//! * `sp-*` (short for *substrate-primitives*) crates, located under `./primitives` folder. These -//! are the traits that glue the client and runtime together, but are not opinionated about what -//! framework is using for building the runtime. Notable examples are [`sp-api`] and [`sp-io`], -//! which form the communication bridge between the client and runtime. -//! * `pallet-*` and `frame-*` crates, located under `./frame` folder. These are the crates related -//! to FRAME. See [`frame`] for more information. -//! -//! ### Wasm Build -//! -//! Many of the Substrate crates, such as entire `sp-*`, need to compile to both Wasm (when a Wasm -//! runtime is being generated) and native (for example, when testing). To achieve this, Substrate -//! follows the convention of the Rust community, and uses a `feature = "std"` to signify that a -//! crate is being built with the standard library, and is built for native. Otherwise, it is built -//! for `no_std`. -//! -//! This can be summarized in `#![cfg_attr(not(feature = "std"), no_std)]`, which you can often find -//! in any Substrate-based runtime. -//! -//! Substrate-based runtimes use [`substrate-wasm-builder`] in their `build.rs` to automatically -//! build their Wasm files as a part of normal build commandsOnce built, the wasm file is placed in -//! `./target/{debug|release}/wbuild/{runtime_name}.wasm`. -//! -//! ### Binaries -//! -//! Multiple binaries are shipped with substrate, the most important of which are located in the -//! `./bin` folder. -//! -//! * [`node`] is an extensive substrate node that contains the superset of all runtime and client -//! side features. The corresponding runtime, called [`kitchensink_runtime`] contains all of the -//! modules that are provided with `FRAME`. This node and runtime is only used for testing and -//! demonstration. -//! * [`chain-spec-builder`]: Utility to build more detailed [chain-spec][`sc-chain-spec`] for the -//! aforementioned node. Other projects typically contain a `build-spec` subcommand that does the -//! same. -//! * [`node-template`]: a template node that contains a minimal set of features and can act as a -//! starting point of a project. -//! * [`subkey`]: Substrate's key management utility. -//! -//! ### Anatomy of a Binary Crate -//! -//! From the above, [`node`] and [`node-template`] are essentially blueprints of a substrate-based -//! project, as the name of the latter is implying. Each substrate-based project typically contains -//! the following: -//! -//! * Under `./runtime`, a `./runtime/src/lib.rs` which is the top level runtime amalgamator file. -//! This file typically contains the [`frame_support::construct_runtime`] macro, which is the -//! final definition of a runtime. -//! -//! * Under `./node`, a `main.rs`, which is the point, and a `./service.rs`, which contains all the -//! client side components. Skimming this file yields an overview of the networking, database, -//! consensus and similar client side components. -//! -//! > The above two are conventions, not rules. -//! -//! ## Parachain? -//! -//! As noted above, Substrate is the main engine behind the Polkadot ecosystem. One of the ways -//! through which Polkadot can be utilized is by building "parachains", blockchains that are -//! connected to Polkadot's shared security. -//! -//! To build a parachain, one could use -//! [`Cumulus`](https://github.com/paritytech/polkadot-sdk/tree/master/cumulus), the library on top -//! of Substrate, empowering any substrate-based chain to be a Polkadot parachain. -//! -//! ## Where To Go Next? -//! -//! Additional noteworthy crates within substrate: -//! -//! - Chain specification of a Substrate node: -//! - [`sc-chain-spec`] -//! - RPC APIs of a Substrate node: [`sc-rpc-api`]/[`sc-rpc`] -//! - CLI Options of a Substrate node: [`sc-cli`] -//! - All of the consensus related crates provided by Substrate: -//! - [`sc-consensus-aura`] -//! - [`sc-consensus-babe`] -//! - [`sc-consensus-grandpa`] -//! - [`sc-consensus-beefy`] -//! - [`sc-consensus-manual-seal`] -//! - [`sc-consensus-pow`] -//! -//! Additional noteworthy external resources: -//! -//! - [Substrate Developer Hub](https://substrate.dev) -//! - [Parity Tech's Documentation Hub](https://paritytech.github.io/) -//! - [Frontier: Substrate's Ethereum Compatibility Library](https://paritytech.github.io/frontier/) -//! - [Polkadot Wiki](https://wiki.polkadot.network/en/) -//! -//! Notable upstream crates: -//! -//! - [`parity-scale-codec`](https://github.com/paritytech/parity-scale-codec) -//! - [`parity-db`](https://github.com/paritytech/parity-db) -//! - [`trie`](https://github.com/paritytech/trie) -//! - [`parity-common`](https://github.com/paritytech/parity-common) -//! -//! Templates: -//! -//! - classic [`substrate-node-template`](https://github.com/substrate-developer-hub/substrate-node-template) -//! - classic [cumulus-parachain-template](https://github.com/substrate-developer-hub/substrate-parachain-template) -//! - [`extended-parachain-template`](https://github.com/paritytech/extended-parachain-template) -//! - [`frontier-parachain-template`](https://github.com/paritytech/frontier-parachain-template) -//! -//! [polkadot]: -//! https://img.shields.io/badge/polkadot-E6007A?style=for-the-badge&logo=polkadot&logoColor=white -//! [github]: -//! https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github -//! [`FRAME`]: ../frame/index.html -//! [`sp-io`]: ../sp_io/index.html -//! [`sp-api`]: ../sp_api/index.html -//! [`sp-api`]: ../sp_api/index.html -//! [`sc-client-db`]: ../sc_client_db/index.html -//! [`sc-chain-spec`]: ../sc_chain_spec/index.html -//! [`sc-network`]: ../sc_network/index.html -//! [`sc-rpc-api`]: ../sc_rpc_api/index.html -//! [`sc-rpc`]: ../sc_rpc/index.html -//! [`sc-cli`]: ../sc_cli/index.html -//! [`sc-consensus-aura`]: ../sc_consensus_aura/index.html -//! [`sc-consensus-babe`]: ../sc_consensus_babe/index.html -//! [`sc-consensus-grandpa`]: ../sc_consensus_grandpa/index.html -//! [`sc-consensus-beefy`]: ../sc_consensus_beefy/index.html -//! [`sc-consensus-manual-seal`]: ../sc_consensus_manual_seal/index.html -//! [`sc-consensus-pow`]: ../sc_consensus_pow/index.html -//! [`node`]: ../node_cli/index.html -//! [`node-template`]: ../node_template/index.html -//! [`kitchensink_runtime`]: ../kitchensink_runtime/index.html -//! [`subkey`]: ../subkey/index.html -//! [`chain-spec-builder`]: ../chain_spec_builder/index.html -//! [`substrate-wasm-builder`]: https://crates.io/crates/substrate-wasm-builder - -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(rustdoc::private_intra_doc_links)] -- GitLab From 64361ac19a3c74ec50329403e45a3a98fc40ac5b Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Thu, 30 Nov 2023 12:58:39 +0100 Subject: [PATCH 168/199] Withdraw Assets Before Checking Out in `OnReapIdentity` impl (#2552) Follow up to fix a bug from #1814 discovered in XCM emulator testing. I mistakenly thought that checking out an asset would withdraw it from the sender. This actually withdraws the asset before checking out. --------- Co-authored-by: Adrian Catangiu Co-authored-by: Liam Aharon --- polkadot/runtime/rococo/src/impls.rs | 24 +++++++++++++++++++++++- polkadot/runtime/westend/src/impls.rs | 24 +++++++++++++++++++++++- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index 71b1091eeb6..eddbfacc3b1 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -79,6 +79,8 @@ impl ToParachainIdentityReaper { } } +// Note / Warning: This implementation should only be used in a transactional context. If not, then +// an error could result in assets being burned. impl OnReapIdentity for ToParachainIdentityReaper where Runtime: frame_system::Config + pallet_xcm::Config, @@ -100,6 +102,19 @@ where // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support // unpaid teleports. + // withdraw the asset from `who` + let who_origin = + Junction::AccountId32 { network: None, id: who.clone().into() }.into_location(); + let _withdrawn = xcm_config::LocalAssetTransactor::withdraw_asset(&roc, &who_origin, None) + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "withdraw_asset(what: {:?}, who_origin: {:?}) error: {:?}", + roc, who_origin, err + ); + pallet_xcm::Error::::LowBalance + })?; + // check out xcm_config::LocalAssetTransactor::can_check_out( &destination, @@ -107,7 +122,14 @@ where // not used in AssetTransactor &XcmContext { origin: None, message_id: [0; 32], topic: None }, ) - .map_err(|_| pallet_xcm::Error::::CannotCheckOutTeleport)?; + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "can_check_out(destination: {:?}, asset: {:?}, _) error: {:?}", + destination, roc, err + ); + pallet_xcm::Error::::CannotCheckOutTeleport + })?; xcm_config::LocalAssetTransactor::check_out( &destination, &roc, diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index 80105594965..5f23bd373b1 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -79,6 +79,8 @@ impl ToParachainIdentityReaper { } } +// Note / Warning: This implementation should only be used in a transactional context. If not, then +// an error could result in assets being burned. impl OnReapIdentity for ToParachainIdentityReaper where Runtime: frame_system::Config + pallet_xcm::Config, @@ -100,6 +102,19 @@ where // Do `check_out` accounting since the XCM Executor's `InitiateTeleport` doesn't support // unpaid teleports. + // withdraw the asset from `who` + let who_origin = + Junction::AccountId32 { network: None, id: who.clone().into() }.into_location(); + let _withdrawn = xcm_config::LocalAssetTransactor::withdraw_asset(&wnd, &who_origin, None) + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "withdraw_asset(what: {:?}, who_origin: {:?}) error: {:?}", + wnd, who_origin, err + ); + pallet_xcm::Error::::LowBalance + })?; + // check out xcm_config::LocalAssetTransactor::can_check_out( &destination, @@ -107,7 +122,14 @@ where // not used in AssetTransactor &XcmContext { origin: None, message_id: [0; 32], topic: None }, ) - .map_err(|_| pallet_xcm::Error::::CannotCheckOutTeleport)?; + .map_err(|err| { + log::error!( + target: "runtime::on_reap_identity", + "can_check_out(destination: {:?}, asset: {:?}, _) error: {:?}", + destination, wnd, err + ); + pallet_xcm::Error::::CannotCheckOutTeleport + })?; xcm_config::LocalAssetTransactor::check_out( &destination, &wnd, -- GitLab From 9a650c46fdf52809bd9b5d687a4e1f77748521d4 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Thu, 30 Nov 2023 15:56:34 +0100 Subject: [PATCH 169/199] PoV Reclaim (Clawback) Node Side (#1462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR provides the infrastructure for the pov-reclaim mechanism discussed in #209. The goal is to provide the current proof size to the runtime so it can be used to reclaim storage weight. ## New Host Function - A new host function is provided [here](https://github.com/skunert/polkadot-sdk/blob/5b317fda3be205f4136f10d4490387ccd4f9765d/cumulus/primitives/pov-reclaim/src/lib.rs#L23). It returns the size of the current proof size to the runtime. If recording is not enabled, it returns 0. ## Implementation Overview - Implement option to enable proof recording during import in the client. This is currently enabled for `polkadot-parachain`, `parachain-template` and the cumulus test node. - Make the proof recorder ready for no-std. It was previously only enabled for std environments, but we need to record the proof size in `validate_block` too. - Provide a recorder implementation that only the records the size of incoming nodes and does not store the nodes itself. - Fix benchmarks that were broken by async backing changes - Provide new externalities extension that is registered by default if proof recording is enabled. - I think we should discuss the naming, pov-reclaim was more intuitive to me, but we could also go with clawback like in the issue. ## Impact of proof recording during import With proof recording: 6.3058 Kelem/s Without proof recording: 6.3427 Kelem/s The measured impact on the importing performance is quite low on my machine using the block import benchmark. With proof recording I am seeing a performance hit of 0.585%. --------- Co-authored-by: command-bot <> Co-authored-by: Davide Galassi Co-authored-by: Bastian Köcher --- Cargo.lock | 19 ++ Cargo.toml | 1 + cumulus/client/service/Cargo.toml | 1 + cumulus/client/service/src/lib.rs | 2 + cumulus/pallets/parachain-system/Cargo.toml | 3 + .../src/validate_block/mod.rs | 4 + .../src/validate_block/trie_recorder.rs | 286 ++++++++++++++++++ .../proof-size-hostfunction/Cargo.toml | 21 ++ .../proof-size-hostfunction/src/lib.rs | 107 +++++++ cumulus/test/client/Cargo.toml | 1 + cumulus/test/client/src/lib.rs | 3 +- cumulus/test/service/benches/block_import.rs | 92 +++--- .../service/benches/block_import_glutton.rs | 37 ++- .../test/service/benches/validate_block.rs | 15 +- cumulus/test/service/src/bench_utils.rs | 7 +- cumulus/test/service/src/lib.rs | 54 ++-- cumulus/test/service/src/main.rs | 3 +- .../client/api/src/execution_extensions.rs | 5 +- substrate/client/block-builder/Cargo.toml | 1 + substrate/client/block-builder/src/lib.rs | 5 + substrate/client/service/src/builder.rs | 26 +- substrate/client/service/src/client/client.rs | 15 +- substrate/client/service/src/lib.rs | 7 +- .../procedural/src/construct_runtime/mod.rs | 2 +- .../api/proc-macro/src/decl_runtime_apis.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- .../runtime-interface/proc-macro/Cargo.toml | 1 + .../proc-macro/src/runtime_interface/mod.rs | 6 + .../state-machine/src/trie_backend.rs | 150 +++++---- .../state-machine/src/trie_backend_essence.rs | 154 +++++----- substrate/primitives/trie/Cargo.toml | 2 + substrate/primitives/trie/src/lib.rs | 26 ++ .../trie/src/proof_size_extension.rs | 39 +++ substrate/primitives/trie/src/recorder.rs | 41 ++- 34 files changed, 899 insertions(+), 241 deletions(-) create mode 100644 cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs create mode 100644 cumulus/primitives/proof-size-hostfunction/Cargo.toml create mode 100644 cumulus/primitives/proof-size-hostfunction/src/lib.rs create mode 100644 substrate/primitives/trie/src/proof_size_extension.rs diff --git a/Cargo.lock b/Cargo.lock index de4471783ab..e7caa46cad8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3495,6 +3495,7 @@ dependencies = [ "cumulus-client-network", "cumulus-client-pov-recovery", "cumulus-primitives-core", + "cumulus-primitives-proof-size-hostfunction", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", "cumulus-relay-chain-minimal-node", @@ -3564,6 +3565,7 @@ dependencies = [ "cumulus-pallet-parachain-system-proc-macro", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-client", "cumulus-test-relay-sproof-builder", "environmental", @@ -3595,6 +3597,7 @@ dependencies = [ "sp-version", "staging-xcm", "trie-db", + "trie-standardmap", ] [[package]] @@ -3743,6 +3746,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "cumulus-primitives-proof-size-hostfunction" +version = "0.1.0" +dependencies = [ + "sp-core", + "sp-externalities 0.19.0", + "sp-io", + "sp-runtime-interface 17.0.0", + "sp-state-machine", + "sp-trie", +] + [[package]] name = "cumulus-primitives-timestamp" version = "0.1.0" @@ -3903,6 +3918,7 @@ version = "0.1.0" dependencies = [ "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", "cumulus-test-relay-sproof-builder", "cumulus-test-runtime", "cumulus-test-service", @@ -14766,6 +14782,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-trie", "substrate-test-runtime-client", ] @@ -17612,6 +17629,7 @@ name = "sp-runtime-interface-proc-macro" version = "11.0.0" dependencies = [ "Inflector", + "expander 2.0.0", "proc-macro-crate", "proc-macro2", "quote", @@ -17864,6 +17882,7 @@ dependencies = [ "scale-info", "schnellru", "sp-core", + "sp-externalities 0.19.0", "sp-runtime", "sp-std 8.0.0", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index a295aca819c..b585ceb5b44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -98,6 +98,7 @@ members = [ "cumulus/primitives/aura", "cumulus/primitives/core", "cumulus/primitives/parachain-inherent", + "cumulus/primitives/proof-size-hostfunction", "cumulus/primitives/timestamp", "cumulus/primitives/utility", "cumulus/test/client", diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index f80c65128d5..cc2f22e6565 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -38,6 +38,7 @@ cumulus-client-consensus-common = { path = "../consensus/common" } cumulus-client-pov-recovery = { path = "../pov-recovery" } cumulus-client-network = { path = "../network" } cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index f8ebca11c8c..950e59aff24 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -52,6 +52,8 @@ use sp_core::{traits::SpawnNamed, Decode}; use sp_runtime::traits::{Block as BlockT, BlockIdTo, Header}; use std::{sync::Arc, time::Duration}; +pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size; + // Given the sporadic nature of the explicit recovery operation and the // possibility to retry infinite times this value is more than enough. // In practice here we expect no more than one queued messages. diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index 5600c95a2a6..cf6af4b4786 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -39,11 +39,13 @@ xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-feature cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent", default-features = false } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction", default-features = false } [dev-dependencies] assert_matches = "1.5" hex-literal = "0.4.1" lazy_static = "1.4" +trie-standardmap = "0.16.0" rand = "0.8.5" futures = "0.3.28" @@ -65,6 +67,7 @@ std = [ "cumulus-pallet-parachain-system-proc-macro/std", "cumulus-primitives-core/std", "cumulus-primitives-parachain-inherent/std", + "cumulus-primitives-proof-size-hostfunction/std", "environmental/std", "frame-benchmarking/std", "frame-support/std", diff --git a/cumulus/pallets/parachain-system/src/validate_block/mod.rs b/cumulus/pallets/parachain-system/src/validate_block/mod.rs index db149401638..763a4cffd77 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/mod.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/mod.rs @@ -26,6 +26,10 @@ mod tests; #[doc(hidden)] mod trie_cache; +#[cfg(any(test, not(feature = "std")))] +#[doc(hidden)] +mod trie_recorder; + #[cfg(not(feature = "std"))] #[doc(hidden)] pub use bytes; diff --git a/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs new file mode 100644 index 00000000000..e73aef70aa4 --- /dev/null +++ b/cumulus/pallets/parachain-system/src/validate_block/trie_recorder.rs @@ -0,0 +1,286 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Provide a specialized trie-recorder and provider for use in validate-block. +//! +//! This file defines two main structs, [`SizeOnlyRecorder`] and +//! [`SizeOnlyRecorderProvider`]. They are used to track the current +//! proof-size without actually recording the accessed nodes themselves. + +use codec::Encode; + +use sp_std::{ + cell::{RefCell, RefMut}, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + rc::Rc, +}; +use sp_trie::{NodeCodec, ProofSizeProvider, StorageProof}; +use trie_db::{Hasher, RecordedForKey, TrieAccess}; + +/// A trie recorder that only keeps track of the proof size. +/// +/// The internal size counting logic should align +/// with ['sp_trie::recorder::Recorder']. +pub(crate) struct SizeOnlyRecorder<'a, H: Hasher> { + seen_nodes: RefMut<'a, BTreeSet>, + encoded_size: RefMut<'a, usize>, + recorded_keys: RefMut<'a, BTreeMap, RecordedForKey>>, +} + +impl<'a, H: trie_db::Hasher> trie_db::TrieRecorder for SizeOnlyRecorder<'a, H> { + fn record(&mut self, access: TrieAccess<'_, H::Out>) { + let mut encoded_size_update = 0; + match access { + TrieAccess::NodeOwned { hash, node_owned } => + if self.seen_nodes.insert(hash) { + let node = node_owned.to_encoded::>(); + encoded_size_update += node.encoded_size(); + }, + TrieAccess::EncodedNode { hash, encoded_node } => + if self.seen_nodes.insert(hash) { + encoded_size_update += encoded_node.encoded_size(); + }, + TrieAccess::Value { hash, value, full_key } => { + if self.seen_nodes.insert(hash) { + encoded_size_update += value.encoded_size(); + } + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, + TrieAccess::Hash { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .or_insert_with(|| RecordedForKey::Hash); + }, + TrieAccess::NonExisting { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, + TrieAccess::InlineValue { full_key } => { + self.recorded_keys + .entry(full_key.into()) + .and_modify(|e| *e = RecordedForKey::Value) + .or_insert_with(|| RecordedForKey::Value); + }, + }; + + *self.encoded_size += encoded_size_update; + } + + fn trie_nodes_recorded_for_key(&self, key: &[u8]) -> RecordedForKey { + self.recorded_keys.get(key).copied().unwrap_or(RecordedForKey::None) + } +} + +#[derive(Clone)] +pub(crate) struct SizeOnlyRecorderProvider { + seen_nodes: Rc>>, + encoded_size: Rc>, + recorded_keys: Rc, RecordedForKey>>>, +} + +impl SizeOnlyRecorderProvider { + pub fn new() -> Self { + Self { + seen_nodes: Default::default(), + encoded_size: Default::default(), + recorded_keys: Default::default(), + } + } +} + +impl sp_trie::TrieRecorderProvider for SizeOnlyRecorderProvider { + type Recorder<'a> = SizeOnlyRecorder<'a, H> where H: 'a; + + fn drain_storage_proof(self) -> Option { + None + } + + fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { + SizeOnlyRecorder { + encoded_size: self.encoded_size.borrow_mut(), + seen_nodes: self.seen_nodes.borrow_mut(), + recorded_keys: self.recorded_keys.borrow_mut(), + } + } +} + +impl ProofSizeProvider for SizeOnlyRecorderProvider { + fn estimate_encoded_size(&self) -> usize { + *self.encoded_size.borrow() + } +} + +// This is safe here since we are single-threaded in WASM +unsafe impl Send for SizeOnlyRecorderProvider {} +unsafe impl Sync for SizeOnlyRecorderProvider {} + +#[cfg(test)] +mod tests { + use rand::Rng; + use sp_trie::{ + cache::{CacheSize, SharedTrieCache}, + MemoryDB, ProofSizeProvider, TrieRecorderProvider, + }; + use trie_db::{Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut, TrieRecorder}; + use trie_standardmap::{Alphabet, StandardMap, ValueMode}; + + use super::*; + + type Recorder = sp_trie::recorder::Recorder; + + fn create_trie() -> ( + sp_trie::MemoryDB, + TrieHash>, + Vec<(Vec, Vec)>, + ) { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + let mut seed = Default::default(); + let test_data: Vec<(Vec, Vec)> = StandardMap { + alphabet: Alphabet::Low, + min_key: 16, + journal_key: 0, + value_mode: ValueMode::Random, + count: 1000, + } + .make_with(&mut seed) + .into_iter() + .map(|(k, v)| { + // Double the length so we end up with some values of 2 bytes and some of 64 + let v = [v.clone(), v].concat(); + (k, v) + }) + .collect(); + + // Fill database with values + { + let mut trie = TrieDBMutBuilder::>::new( + &mut db, &mut root, + ) + .build(); + for (k, v) in &test_data { + trie.insert(k, v).expect("Inserts data"); + } + } + + (db, root, test_data) + } + + #[test] + fn recorder_equivalence_cache() { + let (db, root, test_data) = create_trie(); + + let mut rng = rand::thread_rng(); + for _ in 1..10 { + let reference_recorder = Recorder::default(); + let recorder_for_test: SizeOnlyRecorderProvider = + SizeOnlyRecorderProvider::new(); + let reference_cache: SharedTrieCache = + SharedTrieCache::new(CacheSize::new(1024 * 5)); + let cache_for_test: SharedTrieCache = + SharedTrieCache::new(CacheSize::new(1024 * 5)); + { + let local_cache = cache_for_test.local_cache(); + let mut trie_cache_for_reference = local_cache.as_trie_db_cache(root); + let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); + let reference_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .with_cache(&mut trie_cache_for_reference) + .build(); + + let local_cache_for_test = reference_cache.local_cache(); + let mut trie_cache_for_test = local_cache_for_test.as_trie_db_cache(root); + let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); + let test_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut trie_recorder_under_test) + .with_cache(&mut trie_cache_for_test) + .build(); + + // Access random values from the test data + for _ in 0..100 { + let index: usize = rng.gen_range(0..test_data.len()); + test_trie.get(&test_data[index].0).unwrap().unwrap(); + reference_trie.get(&test_data[index].0).unwrap().unwrap(); + } + + // Check that we have the same nodes recorded for both recorders + for (key, _) in test_data.iter() { + let reference = reference_trie_recorder.trie_nodes_recorded_for_key(key); + let test_value = trie_recorder_under_test.trie_nodes_recorded_for_key(key); + assert_eq!(format!("{:?}", reference), format!("{:?}", test_value)); + } + } + + // Check that we have the same size recorded for both recorders + assert_eq!( + reference_recorder.estimate_encoded_size(), + recorder_for_test.estimate_encoded_size() + ); + } + } + + #[test] + fn recorder_equivalence_no_cache() { + let (db, root, test_data) = create_trie(); + + let mut rng = rand::thread_rng(); + for _ in 1..10 { + let reference_recorder = Recorder::default(); + let recorder_for_test: SizeOnlyRecorderProvider = + SizeOnlyRecorderProvider::new(); + { + let mut reference_trie_recorder = reference_recorder.as_trie_recorder(root); + let reference_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut reference_trie_recorder) + .build(); + + let mut trie_recorder_under_test = recorder_for_test.as_trie_recorder(root); + let test_trie = + TrieDBBuilder::>::new(&db, &root) + .with_recorder(&mut trie_recorder_under_test) + .build(); + + for _ in 0..200 { + let index: usize = rng.gen_range(0..test_data.len()); + test_trie.get(&test_data[index].0).unwrap().unwrap(); + reference_trie.get(&test_data[index].0).unwrap().unwrap(); + } + + // Check that we have the same nodes recorded for both recorders + for (key, _) in test_data.iter() { + let reference = reference_trie_recorder.trie_nodes_recorded_for_key(key); + let test_value = trie_recorder_under_test.trie_nodes_recorded_for_key(key); + assert_eq!(format!("{:?}", reference), format!("{:?}", test_value)); + } + } + + // Check that we have the same size recorded for both recorders + assert_eq!( + reference_recorder.estimate_encoded_size(), + recorder_for_test.estimate_encoded_size() + ); + } + } +} diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml new file mode 100644 index 00000000000..83dad428d00 --- /dev/null +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "cumulus-primitives-proof-size-hostfunction" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +description = "Hostfunction exposing storage proof size to the runtime." +license = "Apache-2.0" + +[dependencies] +sp-runtime-interface = { path = "../../../substrate/primitives/runtime-interface", default-features = false } +sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } + +[dev-dependencies] +sp-state-machine = { path = "../../../substrate/primitives/state-machine" } +sp-core = { path = "../../../substrate/primitives/core" } +sp-io = { path = "../../../substrate/primitives/io" } + +[features] +default = [ "std" ] +std = [ "sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std" ] diff --git a/cumulus/primitives/proof-size-hostfunction/src/lib.rs b/cumulus/primitives/proof-size-hostfunction/src/lib.rs new file mode 100644 index 00000000000..6da6235e585 --- /dev/null +++ b/cumulus/primitives/proof-size-hostfunction/src/lib.rs @@ -0,0 +1,107 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Tools for reclaiming PoV weight in parachain runtimes. + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_externalities::ExternalitiesExt; + +use sp_runtime_interface::runtime_interface; + +#[cfg(feature = "std")] +use sp_trie::proof_size_extension::ProofSizeExt; + +pub const PROOF_RECORDING_DISABLED: u64 = u64::MAX; + +/// Interface that provides access to the current storage proof size. +/// +/// Should return the current storage proof size if [`ProofSizeExt`] is registered. Otherwise, needs +/// to return u64::MAX. +#[runtime_interface] +pub trait StorageProofSize { + /// Returns the current storage proof size. + fn storage_proof_size(&mut self) -> u64 { + self.extension::().map_or(u64::MAX, |e| e.storage_proof_size()) + } +} + +#[cfg(test)] +mod tests { + use sp_core::Blake2Hasher; + use sp_state_machine::TestExternalities; + use sp_trie::{ + proof_size_extension::ProofSizeExt, recorder::Recorder, LayoutV1, PrefixedMemoryDB, + TrieDBMutBuilder, TrieMut, + }; + + use crate::{storage_proof_size, PROOF_RECORDING_DISABLED}; + + const TEST_DATA: &[(&[u8], &[u8])] = &[(b"key1", &[1; 64]), (b"key2", &[2; 64])]; + + type TestLayout = LayoutV1; + + fn get_prepared_test_externalities() -> (TestExternalities, Recorder) + { + let mut db = PrefixedMemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMutBuilder::::new(&mut db, &mut root).build(); + for (k, v) in TEST_DATA { + trie.insert(k, v).expect("Inserts data"); + } + } + + let recorder: sp_trie::recorder::Recorder = Default::default(); + let trie_backend = sp_state_machine::TrieBackendBuilder::new(db, root) + .with_recorder(recorder.clone()) + .build(); + + let mut ext: TestExternalities = TestExternalities::default(); + ext.backend = trie_backend; + (ext, recorder) + } + + #[test] + fn host_function_returns_size_from_recorder() { + let (mut ext, recorder) = get_prepared_test_externalities(); + ext.register_extension(ProofSizeExt::new(recorder)); + + ext.execute_with(|| { + assert_eq!(storage_proof_size::storage_proof_size(), 0); + sp_io::storage::get(b"key1"); + assert_eq!(storage_proof_size::storage_proof_size(), 175); + sp_io::storage::get(b"key2"); + assert_eq!(storage_proof_size::storage_proof_size(), 275); + sp_io::storage::get(b"key2"); + assert_eq!(storage_proof_size::storage_proof_size(), 275); + }); + } + + #[test] + fn host_function_returns_max_without_extension() { + let (mut ext, _) = get_prepared_test_externalities(); + + ext.execute_with(|| { + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); + sp_io::storage::get(b"key1"); + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); + sp_io::storage::get(b"key2"); + assert_eq!(storage_proof_size::storage_proof_size(), PROOF_RECORDING_DISABLED); + }); + } +} diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index b760b796ec9..6ab873d320c 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -36,6 +36,7 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-test-service = { path = "../service" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } cumulus-primitives-core = { path = "../../primitives/core" } +cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-size-hostfunction" } cumulus-primitives-parachain-inherent = { path = "../../primitives/parachain-inherent" } [features] diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index a3c79158f49..df63f683de6 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -44,7 +44,8 @@ mod local_executor { pub struct LocalExecutor; impl sc_executor::NativeExecutionDispatch for LocalExecutor { - type ExtendHostFunctions = (); + type ExtendHostFunctions = + cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions; fn dispatch(method: &str, data: &[u8]) -> Option> { cumulus_test_runtime::api::dispatch(method, data) diff --git a/cumulus/test/service/benches/block_import.rs b/cumulus/test/service/benches/block_import.rs index 254e03b9263..9d6485d74c5 100644 --- a/cumulus/test/service/benches/block_import.rs +++ b/cumulus/test/service/benches/block_import.rs @@ -24,7 +24,7 @@ use core::time::Duration; use cumulus_primitives_core::ParaId; use sp_api::{Core, ProvideRuntimeApi}; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::Sr25519Keyring::{Alice, Bob}; use cumulus_test_service::bench_utils as utils; @@ -32,51 +32,69 @@ fn benchmark_block_import(c: &mut Criterion) { sp_tracing::try_init_simple(); let runtime = tokio::runtime::Runtime::new().expect("creating tokio runtime doesn't fail; qed"); - let para_id = ParaId::from(100); + + let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let tokio_handle = runtime.handle(); // Create enough accounts to fill the block with transactions. // Each account should only be included in one transfer. let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts(); - let alice = runtime.block_on( - cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice) + for bench_parameters in &[(true, Alice), (false, Bob)] { + let node = runtime.block_on( + cumulus_test_service::TestNodeBuilder::new( + para_id, + tokio_handle.clone(), + bench_parameters.1, + ) // Preload all accounts with funds for the transfers - .endowed_accounts(account_ids) + .endowed_accounts(account_ids.clone()) + .import_proof_recording(bench_parameters.0) .build(), - ); - - let client = alice.client; - - let (max_transfer_count, extrinsics) = - utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); - - let parent_hash = client.usage_info().chain.best_hash; - let mut block_builder = BlockBuilderBuilder::new(&*client) - .on_parent_block(parent_hash) - .fetch_parent_block_number(&*client) - .unwrap() - .build() - .unwrap(); - for extrinsic in extrinsics { - block_builder.push(extrinsic).unwrap(); - } - let benchmark_block = block_builder.build().unwrap(); - - let mut group = c.benchmark_group("Block import"); - group.sample_size(20); - group.measurement_time(Duration::from_secs(120)); - group.throughput(Throughput::Elements(max_transfer_count as u64)); - - group.bench_function(format!("(transfers = {}) block import", max_transfer_count), |b| { - b.iter_batched( - || benchmark_block.block.clone(), - |block| { - client.runtime_api().execute_block(parent_hash, block).unwrap(); + ); + + let client = node.client; + let backend = node.backend; + + let (max_transfer_count, extrinsics) = + utils::create_benchmarking_transfer_extrinsics(&client, &src_accounts, &dst_accounts); + + let parent_hash = client.usage_info().chain.best_hash; + let mut block_builder = BlockBuilderBuilder::new(&*client) + .on_parent_block(parent_hash) + .fetch_parent_block_number(&*client) + .unwrap() + .build() + .unwrap(); + for extrinsic in extrinsics { + block_builder.push(extrinsic).unwrap(); + } + let benchmark_block = block_builder.build().unwrap(); + + let mut group = c.benchmark_group("Block import"); + group.sample_size(20); + group.measurement_time(Duration::from_secs(120)); + group.throughput(Throughput::Elements(max_transfer_count as u64)); + + group.bench_function( + format!( + "(transfers = {max_transfer_count}, proof_recording = {}) block import", + bench_parameters.0 + ), + |b| { + b.iter_batched( + || { + backend.reset_trie_cache(); + benchmark_block.block.clone() + }, + |block| { + client.runtime_api().execute_block(parent_hash, block).unwrap(); + }, + BatchSize::SmallInput, + ) }, - BatchSize::SmallInput, - ) - }); + ); + } } criterion_group!(benches, benchmark_block_import); diff --git a/cumulus/test/service/benches/block_import_glutton.rs b/cumulus/test/service/benches/block_import_glutton.rs index aeaf0722e72..6295fd68286 100644 --- a/cumulus/test/service/benches/block_import_glutton.rs +++ b/cumulus/test/service/benches/block_import_glutton.rs @@ -27,7 +27,7 @@ use core::time::Duration; use cumulus_primitives_core::ParaId; use sc_block_builder::BlockBuilderBuilder; -use sp_keyring::Sr25519Keyring::Alice; +use sp_keyring::Sr25519Keyring::{Alice, Bob, Charlie, Ferdie}; use cumulus_test_service::bench_utils as utils; @@ -38,17 +38,29 @@ fn benchmark_block_import(c: &mut Criterion) { let para_id = ParaId::from(100); let tokio_handle = runtime.handle(); - let alice = runtime.block_on( - cumulus_test_service::TestNodeBuilder::new(para_id, tokio_handle.clone(), Alice).build(), - ); - let client = alice.client; + let mut initialize_glutton_pallet = true; + for (compute_ratio, storage_ratio, proof_on_import, keyring_identity) in &[ + (One::one(), Zero::zero(), true, Alice), + (One::one(), One::one(), true, Bob), + (One::one(), Zero::zero(), false, Charlie), + (One::one(), One::one(), false, Ferdie), + ] { + let node = runtime.block_on( + cumulus_test_service::TestNodeBuilder::new( + para_id, + tokio_handle.clone(), + *keyring_identity, + ) + .import_proof_recording(*proof_on_import) + .build(), + ); + let client = node.client; + let backend = node.backend; - let mut group = c.benchmark_group("Block import"); - group.sample_size(20); - group.measurement_time(Duration::from_secs(120)); + let mut group = c.benchmark_group("Block import"); + group.sample_size(20); + group.measurement_time(Duration::from_secs(120)); - let mut initialize_glutton_pallet = true; - for (compute_ratio, storage_ratio) in &[(One::one(), Zero::zero()), (One::one(), One::one())] { let block = utils::set_glutton_parameters( &client, initialize_glutton_pallet, @@ -82,7 +94,10 @@ fn benchmark_block_import(c: &mut Criterion) { ), |b| { b.iter_batched( - || benchmark_block.block.clone(), + || { + backend.reset_trie_cache(); + benchmark_block.block.clone() + }, |block| { client.runtime_api().execute_block(parent_hash, block).unwrap(); }, diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index 11a7c4376d4..a614863803e 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -18,7 +18,9 @@ use codec::{Decode, Encode}; use core::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; -use cumulus_primitives_core::{relay_chain::AccountId, PersistedValidationData, ValidationParams}; +use cumulus_primitives_core::{ + relay_chain::AccountId, ParaId, PersistedValidationData, ValidationParams, +}; use cumulus_test_client::{ generate_extrinsic_with_pair, BuildParachainBlockData, InitBlockBuilder, TestClientBuilder, ValidationResult, @@ -83,6 +85,7 @@ fn benchmark_block_validation(c: &mut Criterion) { // Each account should only be included in one transfer. let (src_accounts, dst_accounts, account_ids) = utils::create_benchmark_accounts(); + let para_id = ParaId::from(cumulus_test_runtime::PARACHAIN_ID); let mut test_client_builder = TestClientBuilder::with_default_backend(); let genesis_init = test_client_builder.genesis_init_mut(); *genesis_init = cumulus_test_client::GenesisParameters { endowed_accounts: account_ids }; @@ -98,7 +101,14 @@ fn benchmark_block_validation(c: &mut Criterion) { ..Default::default() }; - let mut block_builder = client.init_block_builder(Some(validation_data), Default::default()); + let sproof_builder = RelayStateSproofBuilder { + included_para_head: Some(parent_header.clone().encode().into()), + para_id, + ..Default::default() + }; + + let mut block_builder = + client.init_block_builder(Some(validation_data), sproof_builder.clone()); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); } @@ -108,7 +118,6 @@ fn benchmark_block_validation(c: &mut Criterion) { let proof_size_in_kb = parachain_block.storage_proof().encode().len() as f64 / 1024f64; let runtime = utils::get_wasm_module(); - let sproof_builder: RelayStateSproofBuilder = Default::default(); let (relay_parent_storage_root, _) = sproof_builder.into_state_root_and_proof(); let encoded_params = ValidationParams { block_data: cumulus_test_client::BlockData(parachain_block.encode()), diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 82142f21695..1894835caec 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -81,8 +81,13 @@ pub fn extrinsic_set_time(client: &TestClient) -> OpaqueExtrinsic { pub fn extrinsic_set_validation_data( parent_header: cumulus_test_runtime::Header, ) -> OpaqueExtrinsic { - let sproof_builder = RelayStateSproofBuilder { para_id: 100.into(), ..Default::default() }; let parent_head = HeadData(parent_header.encode()); + let sproof_builder = RelayStateSproofBuilder { + para_id: cumulus_test_runtime::PARACHAIN_ID.into(), + included_para_head: parent_head.clone().into(), + ..Default::default() + }; + let (relay_parent_storage_root, relay_chain_state) = sproof_builder.into_state_root_and_proof(); let data = ParachainInherentData { validation_data: PersistedValidationData { diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index fb858ce0b71..627d060d8a0 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -187,6 +187,7 @@ impl RecoveryHandle for FailingRecoveryHandle { /// be able to perform chain operations. pub fn new_partial( config: &mut Configuration, + enable_import_proof_record: bool, ) -> Result< PartialComponents< Client, @@ -214,7 +215,12 @@ pub fn new_partial( sc_executor::NativeElseWasmExecutor::::new_with_wasm_executor(wasm); let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(config, None, executor)?; + sc_service::new_full_parts_record_import::( + config, + None, + executor, + enable_import_proof_record, + )?; let client = Arc::new(client); let block_import = @@ -309,19 +315,21 @@ pub async fn start_node_impl( rpc_ext_builder: RB, consensus: Consensus, collator_options: CollatorOptions, + proof_recording_during_import: bool, ) -> sc_service::error::Result<( TaskManager, Arc, Arc>, RpcHandlers, TransactionPool, + Arc, )> where RB: Fn(Arc) -> Result, sc_service::Error> + Send + 'static, { let mut parachain_config = prepare_node_config(parachain_config); - let params = new_partial(&mut parachain_config)?; + let params = new_partial(&mut parachain_config, proof_recording_during_import)?; let transaction_pool = params.transaction_pool.clone(); let mut task_manager = params.task_manager; @@ -477,7 +485,7 @@ where start_network.start_network(); - Ok((task_manager, client, network, rpc_handlers, transaction_pool)) + Ok((task_manager, client, network, rpc_handlers, transaction_pool, backend)) } /// A Cumulus test node instance used for testing. @@ -495,6 +503,8 @@ pub struct TestNode { pub rpc_handlers: RpcHandlers, /// Node's transaction pool pub transaction_pool: TransactionPool, + /// Node's backend + pub backend: Arc, } #[allow(missing_docs)] @@ -520,6 +530,7 @@ pub struct TestNodeBuilder { consensus: Consensus, relay_chain_mode: RelayChainMode, endowed_accounts: Vec, + record_proof_during_import: bool, } impl TestNodeBuilder { @@ -544,6 +555,7 @@ impl TestNodeBuilder { consensus: Consensus::RelayChain, endowed_accounts: Default::default(), relay_chain_mode: RelayChainMode::Embedded, + record_proof_during_import: true, } } @@ -656,6 +668,12 @@ impl TestNodeBuilder { self } + /// Record proofs during import. + pub fn import_proof_recording(mut self, should_record_proof: bool) -> TestNodeBuilder { + self.record_proof_during_import = should_record_proof; + self + } + /// Build the [`TestNode`]. pub async fn build(self) -> TestNode { let parachain_config = node_config( @@ -684,24 +702,26 @@ impl TestNodeBuilder { format!("{} (relay chain)", relay_chain_config.network.node_name); let multiaddr = parachain_config.network.listen_addresses[0].clone(); - let (task_manager, client, network, rpc_handlers, transaction_pool) = start_node_impl( - parachain_config, - self.collator_key, - relay_chain_config, - self.para_id, - self.wrap_announce_block, - false, - |_| Ok(jsonrpsee::RpcModule::new(())), - self.consensus, - collator_options, - ) - .await - .expect("could not create Cumulus test service"); + let (task_manager, client, network, rpc_handlers, transaction_pool, backend) = + start_node_impl( + parachain_config, + self.collator_key, + relay_chain_config, + self.para_id, + self.wrap_announce_block, + false, + |_| Ok(jsonrpsee::RpcModule::new(())), + self.consensus, + collator_options, + self.record_proof_during_import, + ) + .await + .expect("could not create Cumulus test service"); let peer_id = network.local_peer_id(); let addr = MultiaddrWithPeerId { multiaddr, peer_id }; - TestNode { task_manager, client, network, addr, rpc_handlers, transaction_pool } + TestNode { task_manager, client, network, addr, rpc_handlers, transaction_pool, backend } } } diff --git a/cumulus/test/service/src/main.rs b/cumulus/test/service/src/main.rs index 16b68796bd3..55a0f12d671 100644 --- a/cumulus/test/service/src/main.rs +++ b/cumulus/test/service/src/main.rs @@ -128,7 +128,7 @@ fn main() -> Result<(), sc_cli::Error> { }) .unwrap_or(cumulus_test_service::Consensus::RelayChain); - let (mut task_manager, _, _, _, _) = tokio_runtime + let (mut task_manager, _, _, _, _, _) = tokio_runtime .block_on(cumulus_test_service::start_node_impl( config, collator_key, @@ -139,6 +139,7 @@ fn main() -> Result<(), sc_cli::Error> { |_| Ok(jsonrpsee::RpcModule::new(())), consensus, collator_options, + true, )) .expect("could not create Cumulus test service"); diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index 6f927105df0..26d3ae73f69 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -91,7 +91,6 @@ impl ExtensionsFactory /// /// This crate aggregates extensions available for the offchain calls /// and is responsible for producing a correct `Extensions` object. -/// for each call, based on required `Capabilities`. pub struct ExecutionExtensions { extensions_factory: RwLock>>, read_runtime_version: Arc, @@ -116,8 +115,7 @@ impl ExecutionExtensions { *self.extensions_factory.write() = Box::new(maker); } - /// Based on the execution context and capabilities it produces - /// the extensions object to support desired set of APIs. + /// Produces default extensions based on the input parameters. pub fn extensions( &self, block_hash: Block::Hash, @@ -127,7 +125,6 @@ impl ExecutionExtensions { self.extensions_factory.read().extensions_for(block_hash, block_number); extensions.register(ReadRuntimeVersionExt::new(self.read_runtime_version.clone())); - extensions } } diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 2492c4101b2..852ee84f89b 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -20,6 +20,7 @@ sp-api = { path = "../../primitives/api" } sp-block-builder = { path = "../../primitives/block-builder" } sp-blockchain = { path = "../../primitives/blockchain" } sp-core = { path = "../../primitives/core" } +sp-trie = { path = "../../primitives/trie" } sp-inherents = { path = "../../primitives/inherents" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index f62b941fdb1..258e39d962b 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -42,6 +42,7 @@ use sp_runtime::{ use std::marker::PhantomData; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_trie::proof_size_extension::ProofSizeExt; /// A builder for creating an instance of [`BlockBuilder`]. pub struct BlockBuilderBuilder<'a, B, C> { @@ -235,6 +236,10 @@ where if record_proof { api.record_proof(); + let recorder = api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + api.register_extension(ProofSizeExt::new(recorder)); } api.set_call_context(CallContext::Onchain); diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index d078f44f198..1a3a679c519 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -130,10 +130,11 @@ where } /// Create the initial parts of a full node with the default genesis block builder. -pub fn new_full_parts( +pub fn new_full_parts_record_import( config: &Configuration, telemetry: Option, executor: TExec, + enable_import_proof_recording: bool, ) -> Result, Error> where TBl: BlockT, @@ -148,7 +149,26 @@ where executor.clone(), )?; - new_full_parts_with_genesis_builder(config, telemetry, executor, backend, genesis_block_builder) + new_full_parts_with_genesis_builder( + config, + telemetry, + executor, + backend, + genesis_block_builder, + enable_import_proof_recording, + ) +} +/// Create the initial parts of a full node with the default genesis block builder. +pub fn new_full_parts( + config: &Configuration, + telemetry: Option, + executor: TExec, +) -> Result, Error> +where + TBl: BlockT, + TExec: CodeExecutor + RuntimeVersionOf + Clone, +{ + new_full_parts_record_import(config, telemetry, executor, false) } /// Create the initial parts of a full node. @@ -158,6 +178,7 @@ pub fn new_full_parts_with_genesis_builder>, genesis_block_builder: TBuildGenesisBlock, + enable_import_proof_recording: bool, ) -> Result, Error> where TBl: BlockT, @@ -225,6 +246,7 @@ where SyncMode::LightState { .. } | SyncMode::Warp { .. } ), wasm_runtime_substitutes, + enable_import_proof_recording, }, )?; diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index 9d51aae55b2..aa9c1b80a29 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -77,7 +77,7 @@ use sp_state_machine::{ ChildStorageCollection, KeyValueStates, KeyValueStorageLevel, StorageCollection, MAX_NESTED_TRIE_DEPTH, }; -use sp_trie::{CompactProof, MerkleValue, StorageProof}; +use sp_trie::{proof_size_extension::ProofSizeExt, CompactProof, MerkleValue, StorageProof}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -184,7 +184,7 @@ where ) } -/// Relevant client configuration items relevant for the client. +/// Client configuration items. #[derive(Debug, Clone)] pub struct ClientConfig { /// Enable the offchain worker db. @@ -198,6 +198,8 @@ pub struct ClientConfig { /// Map of WASM runtime substitute starting at the child of the given block until the runtime /// version doesn't match anymore. pub wasm_runtime_substitutes: HashMap, Vec>, + /// Enable recording of storage proofs during block import + pub enable_import_proof_recording: bool, } impl Default for ClientConfig { @@ -208,6 +210,7 @@ impl Default for ClientConfig { wasm_runtime_overrides: None, no_genesis: false, wasm_runtime_substitutes: HashMap::new(), + enable_import_proof_recording: false, } } } @@ -858,6 +861,14 @@ where runtime_api.set_call_context(CallContext::Onchain); + if self.config.enable_import_proof_recording { + runtime_api.record_proof(); + let recorder = runtime_api + .proof_recorder() + .expect("Proof recording is enabled in the line above; qed."); + runtime_api.register_extension(ProofSizeExt::new(recorder)); + } + runtime_api.execute_block( *parent_hash, Block::new(import_block.header.clone(), body.clone()), diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index ff9eb982b86..0c7e138ce90 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -53,9 +53,10 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use self::{ builder::{ build_network, new_client, new_db_backend, new_full_client, new_full_parts, - new_full_parts_with_genesis_builder, new_native_or_wasm_executor, new_wasm_executor, - spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, - TFullBackend, TFullCallExecutor, TFullClient, + new_full_parts_record_import, new_full_parts_with_genesis_builder, + new_native_or_wasm_executor, new_wasm_executor, spawn_tasks, BuildNetworkParams, + KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullBackend, TFullCallExecutor, + TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index d1d6040d33a..010143574ed 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -253,7 +253,7 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { let res = res.unwrap_or_else(|e| e.to_compile_error()); let res = expander::Expander::new("construct_runtime") - .dry(std::env::var("FRAME_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(res) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index 370735819f9..2b1e65ec885 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -729,7 +729,7 @@ fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result }; let decl = expander::Expander::new("decl_runtime_apis") - .dry(std::env::var("SP_API_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(decl) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index e97291bc58a..fd81fdb624c 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -846,7 +846,7 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { ); let impl_ = expander::Expander::new("impl_runtime_apis") - .dry(std::env::var("SP_API_EXPAND").is_err()) + .dry(std::env::var("EXPAND_MACROS").is_err()) .verbose(true) .write_to_out_dir(impl_) .expect("Does not fail because of IO in OUT_DIR; qed"); diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index fbc49785ae9..11236de91ce 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -20,4 +20,5 @@ Inflector = "0.11.4" proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" +expander = "2.0.0" syn = { version = "2.0.38", features = ["full", "visit", "fold", "extra-traits"] } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 008d69b3210..d0cc9e7b96b 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -68,5 +68,11 @@ pub fn runtime_interface_impl( } }; + let res = expander::Expander::new("runtime_interface") + .dry(std::env::var("EXPAND_MACROS").is_err()) + .verbose(true) + .write_to_out_dir(res) + .expect("Does not fail because of IO in OUT_DIR; qed"); + Ok(res) } diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 7b337b5fd54..7496463e642 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -33,12 +33,12 @@ use sp_core::storage::{ChildInfo, StateVersion}; #[cfg(feature = "std")] use sp_trie::{ cache::{LocalTrieCache, TrieCache}, - recorder::Recorder, - MemoryDB, StorageProof, + MemoryDB, }; #[cfg(not(feature = "std"))] use sp_trie::{Error, NodeCodec}; -use sp_trie::{MerkleValue, PrefixedMemoryDB}; +use sp_trie::{MerkleValue, PrefixedMemoryDB, StorageProof, TrieRecorderProvider}; + use trie_db::TrieCache as TrieCacheT; #[cfg(not(feature = "std"))] use trie_db::{node::NodeOwned, CachedValue}; @@ -112,8 +112,6 @@ pub struct UnimplementedCacheProvider { // Not strictly necessary, but the H bound allows to use this as a drop-in // replacement for the `LocalTrieCache` in no-std contexts. _phantom: core::marker::PhantomData, - // Statically prevents construction. - _infallible: core::convert::Infallible, } #[cfg(not(feature = "std"))] @@ -156,52 +154,83 @@ impl TrieCacheProvider for UnimplementedCacheProvider { } } +/// Recorder provider that allows construction of a [`TrieBackend`] and satisfies the requirements, +/// but can never be instantiated. +#[cfg(not(feature = "std"))] +pub struct UnimplementedRecorderProvider { + // Not strictly necessary, but the H bound allows to use this as a drop-in + // replacement for the [`sp_trie::recorder::Recorder`] in no-std contexts. + _phantom: core::marker::PhantomData, +} + +#[cfg(not(feature = "std"))] +impl trie_db::TrieRecorder for UnimplementedRecorderProvider { + fn record<'a>(&mut self, _access: trie_db::TrieAccess<'a, H::Out>) { + unimplemented!() + } + + fn trie_nodes_recorded_for_key(&self, _key: &[u8]) -> trie_db::RecordedForKey { + unimplemented!() + } +} + +#[cfg(not(feature = "std"))] +impl TrieRecorderProvider for UnimplementedRecorderProvider { + type Recorder<'a> = UnimplementedRecorderProvider where H: 'a; + + fn drain_storage_proof(self) -> Option { + unimplemented!() + } + + fn as_trie_recorder(&self, _storage_root: H::Out) -> Self::Recorder<'_> { + unimplemented!() + } +} + #[cfg(feature = "std")] type DefaultCache = LocalTrieCache; #[cfg(not(feature = "std"))] type DefaultCache = UnimplementedCacheProvider; +#[cfg(feature = "std")] +type DefaultRecorder = sp_trie::recorder::Recorder; + +#[cfg(not(feature = "std"))] +type DefaultRecorder = UnimplementedRecorderProvider; + /// Builder for creating a [`TrieBackend`]. -pub struct TrieBackendBuilder, H: Hasher, C = DefaultCache> { +pub struct TrieBackendBuilder< + S: TrieBackendStorage, + H: Hasher, + C = DefaultCache, + R = DefaultRecorder, +> { storage: S, root: H::Out, - #[cfg(feature = "std")] - recorder: Option>, + recorder: Option, cache: Option, } -impl TrieBackendBuilder> +impl TrieBackendBuilder where S: TrieBackendStorage, H: Hasher, { /// Create a new builder instance. pub fn new(storage: S, root: H::Out) -> Self { - Self { - storage, - root, - #[cfg(feature = "std")] - recorder: None, - cache: None, - } + Self { storage, root, recorder: None, cache: None } } } -impl TrieBackendBuilder +impl TrieBackendBuilder where S: TrieBackendStorage, H: Hasher, { /// Create a new builder instance. pub fn new_with_cache(storage: S, root: H::Out, cache: C) -> Self { - Self { - storage, - root, - #[cfg(feature = "std")] - recorder: None, - cache: Some(cache), - } + Self { storage, root, recorder: None, cache: Some(cache) } } /// Wrap the given [`TrieBackend`]. /// @@ -210,53 +239,47 @@ where /// backend. /// /// The backend storage and the cache will be taken from `other`. - pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C> { + pub fn wrap(other: &TrieBackend) -> TrieBackendBuilder<&S, H, &C, R> { TrieBackendBuilder { storage: other.essence.backend_storage(), root: *other.essence.root(), - #[cfg(feature = "std")] recorder: None, cache: other.essence.trie_node_cache.as_ref(), } } /// Use the given optional `recorder` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_optional_recorder(self, recorder: Option>) -> Self { + pub fn with_optional_recorder(self, recorder: Option) -> Self { Self { recorder, ..self } } /// Use the given `recorder` for the to be configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn with_recorder(self, recorder: Recorder) -> Self { + pub fn with_recorder(self, recorder: R) -> Self { Self { recorder: Some(recorder), ..self } } /// Use the given optional `cache` for the to be configured [`TrieBackend`]. - pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { + pub fn with_optional_cache(self, cache: Option) -> TrieBackendBuilder { TrieBackendBuilder { cache, root: self.root, storage: self.storage, - #[cfg(feature = "std")] recorder: self.recorder, } } /// Use the given `cache` for the to be configured [`TrieBackend`]. - pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { + pub fn with_cache(self, cache: LC) -> TrieBackendBuilder { TrieBackendBuilder { cache: Some(cache), root: self.root, storage: self.storage, - #[cfg(feature = "std")] recorder: self.recorder, } } /// Build the configured [`TrieBackend`]. - #[cfg(feature = "std")] - pub fn build(self) -> TrieBackend { + pub fn build(self) -> TrieBackend { TrieBackend { essence: TrieBackendEssence::new_with_cache_and_recorder( self.storage, @@ -267,27 +290,18 @@ where next_storage_key_cache: Default::default(), } } - - /// Build the configured [`TrieBackend`]. - #[cfg(not(feature = "std"))] - pub fn build(self) -> TrieBackend { - TrieBackend { - essence: TrieBackendEssence::new_with_cache(self.storage, self.root, self.cache), - next_storage_key_cache: Default::default(), - } - } } /// A cached iterator. -struct CachedIter +struct CachedIter where H: Hasher, { last_key: sp_std::vec::Vec, - iter: RawIter, + iter: RawIter, } -impl Default for CachedIter +impl Default for CachedIter where H: Hasher, { @@ -313,23 +327,32 @@ fn access_cache(cell: &CacheCell, callback: impl FnOnce(&mut T) -> R) - } /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. -pub struct TrieBackend, H: Hasher, C = DefaultCache> { - pub(crate) essence: TrieBackendEssence, - next_storage_key_cache: CacheCell>>, +pub struct TrieBackend< + S: TrieBackendStorage, + H: Hasher, + C = DefaultCache, + R = DefaultRecorder, +> { + pub(crate) essence: TrieBackendEssence, + next_storage_key_cache: CacheCell>>, } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - TrieBackend +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > TrieBackend where H::Out: Codec, { #[cfg(test)] - pub(crate) fn from_essence(essence: TrieBackendEssence) -> Self { + pub(crate) fn from_essence(essence: TrieBackendEssence) -> Self { Self { essence, next_storage_key_cache: Default::default() } } /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { + pub fn essence(&self) -> &TrieBackendEssence { &self.essence } @@ -361,28 +384,31 @@ where /// Extract the [`StorageProof`]. /// /// This only returns `Some` when there was a recorder set. - #[cfg(feature = "std")] pub fn extract_proof(mut self) -> Option { - self.essence.recorder.take().map(|r| r.drain_storage_proof()) + self.essence.recorder.take().and_then(|r| r.drain_storage_proof()) } } -impl, H: Hasher, C: TrieCacheProvider> sp_std::fmt::Debug - for TrieBackend +impl, H: Hasher, C: TrieCacheProvider, R: TrieRecorderProvider> + sp_std::fmt::Debug for TrieBackend { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "TrieBackend") } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> Backend - for TrieBackend +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > Backend for TrieBackend where H::Out: Ord + Codec, { type Error = crate::DefaultError; type TrieBackendStorage = S; - type RawIter = crate::trie_backend_essence::RawIter; + type RawIter = crate::trie_backend_essence::RawIter; fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { self.essence.storage_hash(key) diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index ad7aeab899c..3f789111dee 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -28,19 +28,19 @@ use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::{ChildInfo, ChildType, StateVersion}; -use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; #[cfg(feature = "std")] -use sp_trie::recorder::Recorder; +use sp_std::sync::Arc; +use sp_std::{boxed::Box, marker::PhantomData, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_first_descedant_value, read_child_trie_hash, read_child_trie_value, read_trie_first_descedant_value, read_trie_value, trie_types::{TrieDBBuilder, TrieError}, DBValue, KeySpacedDB, MerkleValue, NodeCodec, PrefixedMemoryDB, Trie, TrieCache, - TrieDBRawIterator, TrieRecorder, + TrieDBRawIterator, TrieRecorder, TrieRecorderProvider, }; #[cfg(feature = "std")] -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; // In this module, we only use layout for read operation and empty root, // where V1 and V0 are equivalent. use sp_trie::LayoutV1 as Layout; @@ -83,7 +83,7 @@ enum IterState { } /// A raw iterator over the storage. -pub struct RawIter +pub struct RawIter where H: Hasher, { @@ -93,25 +93,26 @@ where child_info: Option, trie_iter: TrieDBRawIterator>, state: IterState, - _phantom: PhantomData<(S, C)>, + _phantom: PhantomData<(S, C, R)>, } -impl RawIter +impl RawIter where H: Hasher, S: TrieBackendStorage, H::Out: Codec + Ord, C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { #[inline] - fn prepare( + fn prepare( &mut self, - backend: &TrieBackendEssence, + backend: &TrieBackendEssence, callback: impl FnOnce( &sp_trie::TrieDB>, &mut TrieDBRawIterator>, - ) -> Option::Out>>>>, - ) -> Option> { + ) -> Option::Out>>>>, + ) -> Option> { if !matches!(self.state, IterState::Pending) { return None } @@ -139,7 +140,7 @@ where } } -impl Default for RawIter +impl Default for RawIter where H: Hasher, { @@ -156,14 +157,15 @@ where } } -impl StorageIterator for RawIter +impl StorageIterator for RawIter where H: Hasher, S: TrieBackendStorage, H::Out: Codec + Ord, C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, { - type Backend = crate::TrieBackend; + type Backend = crate::TrieBackend; type Error = crate::DefaultError; #[inline] @@ -204,18 +206,17 @@ where } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher, C> { +pub struct TrieBackendEssence, H: Hasher, C, R> { storage: S, root: H::Out, empty: H::Out, #[cfg(feature = "std")] pub(crate) cache: Arc>>, pub(crate) trie_node_cache: Option, - #[cfg(feature = "std")] - pub(crate) recorder: Option>, + pub(crate) recorder: Option, } -impl, H: Hasher, C> TrieBackendEssence { +impl, H: Hasher, C, R> TrieBackendEssence { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { Self::new_with_cache(storage, root, None) @@ -230,23 +231,22 @@ impl, H: Hasher, C> TrieBackendEssence { #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), trie_node_cache: cache, - #[cfg(feature = "std")] recorder: None, } } /// Create new trie-based backend. - #[cfg(feature = "std")] pub fn new_with_cache_and_recorder( storage: S, root: H::Out, cache: Option, - recorder: Option>, + recorder: Option, ) -> Self { TrieBackendEssence { storage, root, empty: H::hash(&[0u8]), + #[cfg(feature = "std")] cache: Arc::new(RwLock::new(Cache::new())), trie_node_cache: cache, recorder, @@ -289,37 +289,31 @@ impl, H: Hasher, C> TrieBackendEssence { } } -impl, H: Hasher, C: TrieCacheProvider> TrieBackendEssence { +impl, H: Hasher, C: TrieCacheProvider, R: TrieRecorderProvider> + TrieBackendEssence +{ /// Call the given closure passing it the recorder and the cache. /// /// If the given `storage_root` is `None`, `self.root` will be used. #[inline] - fn with_recorder_and_cache( + fn with_recorder_and_cache( &self, storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, - ) -> R, - ) -> R { + ) -> RE, + ) -> RE { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut cache = self.trie_node_cache.as_ref().map(|c| c.as_trie_db_cache(storage_root)); let cache = cache.as_mut().map(|c| c as _); - #[cfg(feature = "std")] - { - let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); - let recorder = match recorder.as_mut() { - Some(recorder) => Some(recorder as &mut dyn TrieRecorder), - None => None, - }; - callback(recorder, cache) - } - - #[cfg(not(feature = "std"))] - { - callback(None, cache) - } + let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); + let recorder = match recorder.as_mut() { + Some(recorder) => Some(recorder as &mut dyn TrieRecorder), + None => None, + }; + callback(recorder, cache) } /// Call the given closure passing it the recorder and the cache. @@ -329,15 +323,14 @@ impl, H: Hasher, C: TrieCacheProvider> TrieBackendEs /// the new storage root. This is required to register the changes in the cache /// for the correct storage root. The given `storage_root` corresponds to the root of the "old" /// trie. If the value is not given, `self.root` is used. - #[cfg(feature = "std")] - fn with_recorder_and_cache_for_storage_root( + fn with_recorder_and_cache_for_storage_root( &self, storage_root: Option, callback: impl FnOnce( Option<&mut dyn TrieRecorder>, Option<&mut dyn TrieCache>>, - ) -> (Option, R), - ) -> R { + ) -> (Option, RE), + ) -> RE { let storage_root = storage_root.unwrap_or_else(|| self.root); let mut recorder = self.recorder.as_ref().map(|r| r.as_trie_recorder(storage_root)); let recorder = match recorder.as_mut() { @@ -361,46 +354,26 @@ impl, H: Hasher, C: TrieCacheProvider> TrieBackendEs result } - - #[cfg(not(feature = "std"))] - fn with_recorder_and_cache_for_storage_root( - &self, - _storage_root: Option, - callback: impl FnOnce( - Option<&mut dyn TrieRecorder>, - Option<&mut dyn TrieCache>>, - ) -> (Option, R), - ) -> R { - if let Some(local_cache) = self.trie_node_cache.as_ref() { - let mut cache = local_cache.as_trie_db_mut_cache(); - - let (new_root, r) = callback(None, Some(&mut cache)); - - if let Some(new_root) = new_root { - local_cache.merge(cache, new_root); - } - - r - } else { - callback(None, None).1 - } - } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > TrieBackendEssence where H::Out: Codec + Ord, { /// Calls the given closure with a [`TrieDb`] constructed for the given /// storage root and (optionally) child trie. #[inline] - fn with_trie_db( + fn with_trie_db( &self, root: H::Out, child_info: Option<&ChildInfo>, - callback: impl FnOnce(&sp_trie::TrieDB>) -> R, - ) -> R { + callback: impl FnOnce(&sp_trie::TrieDB>) -> RE, + ) -> RE { let backend = self as &dyn HashDBRef>; let db = child_info .as_ref() @@ -609,7 +582,7 @@ where } /// Create a raw iterator over the storage. - pub fn raw_iter(&self, args: IterArgs) -> Result> { + pub fn raw_iter(&self, args: IterArgs) -> Result> { let root = if let Some(child_info) = args.child_info.as_ref() { let root = match self.child_root(&child_info)? { Some(root) => root, @@ -831,19 +804,28 @@ where } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - AsHashDB for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > AsHashDB for TrieBackendEssence { fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> HashDB - for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { @@ -875,8 +857,12 @@ impl, H: Hasher, C: TrieCacheProvider + Send + Sync> } } -impl, H: Hasher, C: TrieCacheProvider + Send + Sync> - HashDBRef for TrieBackendEssence +impl< + S: TrieBackendStorage, + H: Hasher, + C: TrieCacheProvider + Send + Sync, + R: TrieRecorderProvider + Send + Sync, + > HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) @@ -928,7 +914,10 @@ mod test { .expect("insert failed"); }; - let essence_1 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_1); + let essence_1 = + TrieBackendEssence::<_, _, LocalTrieCache<_>, sp_trie::recorder::Recorder<_>>::new( + mdb, root_1, + ); let mdb = essence_1.backend_storage().clone(); let essence_1 = TrieBackend::from_essence(essence_1); @@ -938,7 +927,10 @@ mod test { assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); - let essence_2 = TrieBackendEssence::<_, _, LocalTrieCache<_>>::new(mdb, root_2); + let essence_2 = + TrieBackendEssence::<_, _, LocalTrieCache<_>, sp_trie::recorder::Recorder<_>>::new( + mdb, root_2, + ); assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 0822d84a76e..931ccce2044 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -34,6 +34,7 @@ trie-db = { version = "0.28.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } sp-core = { path = "../core", default-features = false} sp-std = { path = "../std", default-features = false} +sp-externalities = { path = "../externalities", default-features = false } schnellru = { version = "0.2.1", optional = true } [dev-dependencies] @@ -58,6 +59,7 @@ std = [ "scale-info/std", "schnellru", "sp-core/std", + "sp-externalities/std", "sp-runtime/std", "sp-std/std", "thiserror", diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index 1a1ed670454..fd1320b3fbc 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -30,6 +30,9 @@ mod storage_proof; mod trie_codec; mod trie_stream; +#[cfg(feature = "std")] +pub mod proof_size_extension; + /// Our `NodeCodec`-specific error. pub use error::Error; /// Various re-exports from the `hash-db` crate. @@ -146,6 +149,29 @@ where } } +/// Type that is able to provide a [`trie_db::TrieRecorder`]. +/// +/// Types implementing this trait can be used to maintain recorded state +/// across operations on different [`trie_db::TrieDB`] instances. +pub trait TrieRecorderProvider { + /// Recorder type that is going to be returned by implementors of this trait. + type Recorder<'a>: trie_db::TrieRecorder + 'a + where + Self: 'a; + + /// Create a [`StorageProof`] derived from the internal state. + fn drain_storage_proof(self) -> Option; + + /// Provide a recorder implementing [`trie_db::TrieRecorder`]. + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_>; +} + +/// Type that is able to provide a proof size estimation. +pub trait ProofSizeProvider { + /// Returns the storage proof size. + fn estimate_encoded_size(&self) -> usize; +} + /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs new file mode 100644 index 00000000000..c97f334494a --- /dev/null +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Externalities extension that provides access to the current proof size +//! of the underlying recorder. + +use crate::ProofSizeProvider; + +sp_externalities::decl_extension! { + /// The proof size extension to fetch the current storage proof size + /// in externalities. + pub struct ProofSizeExt(Box); +} + +impl ProofSizeExt { + /// Creates a new instance of [`ProofSizeExt`]. + pub fn new(recorder: T) -> Self { + ProofSizeExt(Box::new(recorder)) + } + + /// Returns the storage proof size. + pub fn storage_proof_size(&self) -> u64 { + self.0.estimate_encoded_size() as _ + } +} diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index 154cee3f37d..b236f281bb1 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -23,7 +23,7 @@ use crate::{NodeCodec, StorageProof}; use codec::Encode; use hash_db::Hasher; -use parking_lot::Mutex; +use parking_lot::{Mutex, MutexGuard}; use std::{ collections::{HashMap, HashSet}, marker::PhantomData, @@ -80,7 +80,9 @@ impl Default for RecorderInner { /// The trie recorder. /// -/// It can be used to record accesses to the trie and then to convert them into a [`StorageProof`]. +/// Owns the recorded data. Is used to transform data into a storage +/// proof and to provide transaction support. The `as_trie_recorder` method provides a +/// [`trie_db::TrieDB`] compatible recorder that implements the actual recording logic. pub struct Recorder { inner: Arc>>, /// The estimated encoded size of the storage proof this recorder will produce. @@ -112,11 +114,8 @@ impl Recorder { /// /// NOTE: This locks a mutex that stays locked until the return value is dropped. #[inline] - pub fn as_trie_recorder( - &self, - storage_root: H::Out, - ) -> impl trie_db::TrieRecorder + '_ { - TrieRecorder:: { + pub fn as_trie_recorder(&self, storage_root: H::Out) -> TrieRecorder<'_, H> { + TrieRecorder:: { inner: self.inner.lock(), storage_root, encoded_size_estimation: self.encoded_size_estimation.clone(), @@ -231,15 +230,33 @@ impl Recorder { } } +impl crate::ProofSizeProvider for Recorder { + fn estimate_encoded_size(&self) -> usize { + Recorder::estimate_encoded_size(self) + } +} + /// The [`TrieRecorder`](trie_db::TrieRecorder) implementation. -struct TrieRecorder { - inner: I, +pub struct TrieRecorder<'a, H: Hasher> { + inner: MutexGuard<'a, RecorderInner>, storage_root: H::Out, encoded_size_estimation: Arc, _phantom: PhantomData, } -impl>> TrieRecorder { +impl crate::TrieRecorderProvider for Recorder { + type Recorder<'a> = TrieRecorder<'a, H> where H: 'a; + + fn drain_storage_proof(self) -> Option { + Some(Recorder::drain_storage_proof(self)) + } + + fn as_trie_recorder(&self, storage_root: H::Out) -> Self::Recorder<'_> { + Recorder::as_trie_recorder(&self, storage_root) + } +} + +impl<'a, H: Hasher> TrieRecorder<'a, H> { /// Update the recorded keys entry for the given `full_key`. fn update_recorded_keys(&mut self, full_key: &[u8], access: RecordedForKey) { let inner = self.inner.deref_mut(); @@ -283,9 +300,7 @@ impl>> TrieRecorder } } -impl>> trie_db::TrieRecorder - for TrieRecorder -{ +impl<'a, H: Hasher> trie_db::TrieRecorder for TrieRecorder<'a, H> { fn record(&mut self, access: TrieAccess) { let mut encoded_size_update = 0; -- GitLab From c30ed6f2ff0b88c37b690884d9049ce05ebd34a4 Mon Sep 17 00:00:00 2001 From: Juan Girini Date: Thu, 30 Nov 2023 17:36:14 +0100 Subject: [PATCH 170/199] Add missing glossary to ref docs (#2572) This PR is the same as https://github.com/paritytech/polkadot-sdk/pull/2273, but that one had too many conflicts after a base change. As it is a just one file PR I decided to start over with a clean slate. --- This PR adds some missing glossary definitions to the Developer Hub ref docs FIxes https://github.com/paritytech/polkadot-sdk-docs/issues/40 --- developer-hub/src/reference_docs/glossary.rs | 64 +++++++++++++++++++- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/developer-hub/src/reference_docs/glossary.rs b/developer-hub/src/reference_docs/glossary.rs index d0bd6accce6..56f5ef5aeb5 100644 --- a/developer-hub/src/reference_docs/glossary.rs +++ b/developer-hub/src/reference_docs/glossary.rs @@ -36,27 +36,85 @@ //! //! #### Offchain //! -//! #### Host Function: +//! Refers to operations conducted outside the blockchain's consensus mechanism. They are essential +//! for enhancing scalability and efficiency, enabling activities like data fetching and computation +//! without bloating the blockchain state. +//! +//! #### Host Functions: +//! +//! Host functions are the node's API, these are functions provided by the runtime environment (the +//! [host](#host)) to the Wasm runtime. These functions allow the Wasm code to interact with and +//! perform operations on the [node](#node), like accessing the blockchain state. //! //! #### Runtime API: //! +//! This is the API of the runtime, it acts as a communication bridge between the runtime and the +//! node, serving as the exposed interface that facilitates their interactions. +//! //! #### Dispatchable: //! -//! Callable +//! Dispatchables are [function objects](https://en.wikipedia.org/wiki/Function_object) that act as +//! the entry points in [FRAME](frame) pallets. They can be called by internal or external entities +//! to interact with the blockchain's state. They are a core aspect of the runtime logic, handling +//! transactions and other state-changing operations. +//! +//! **Synonyms**: Callable //! //! #### Extrinsic //! +//! An extrinsic is a general term for a piece of data that is originated outside of the runtime, +//! included into a block and leads to some action. This includes user-initiated transactions as +//! well as inherents which are placed into the block by the block-builder. //! //! #### Pallet //! +//! Similar to software modules in traditional programming, [FRAME](frame) pallets in Substrate are +//! modular components that encapsulate distinct functionalities or business logic. Just as +//! libraries or modules are used to build and extend the capabilities of a software application, +//! pallets are the foundational building blocks for constructing a blockchain's runtime with frame. +//! They enable the creation of customizable and upgradeable networks, offering a composable +//! framework for a Substrate-based blockchain. Each pallet can be thought of as a plug-and-play +//! module, enhancing the blockchain's functionality in a cohesive and integrated manner. +//! //! #### Full Node //! +//! It is a node that prunes historical states, keeping only recent finalized block states to reduce +//! storage needs. Full nodes provide current chain state access and allow direct submission and +//! validation of extrinsics, maintaining network decentralization. +//! //! #### Archive Node //! +//! An archive node is a specialized node that maintains a complete history of all block states and +//! transactions. Unlike a full node, it does not prune historical data, ensuring full access to the +//! entire blockchain history. This makes it essential for detailed blockchain analysis and +//! historical queries, but requires significantly more storage capacity. +//! //! #### Validator //! +//! A validator is a node that participates in the consensus mechanism of the network. +//! Its role includes block production, transaction validation, network integrity and security +//! maintenance. +//! //! #### Collator //! +//! A collator is a node that is responsible for producing candidate blocks for the validators. +//! Collators are similar to validators on any other blockchain but, they do not need to provide +//! security guarantees as the Relay Chain handles this. +//! //! #### Parachain //! -//! aka. AppChain. +//! Short for "parallelized chain" a parachain is a specialized blockchain that runs in parallel to +//! the Relay Chain (Polkadot, Kusama, etc.), benefiting from the shared security and +//! interoperability features of it. +//! +//! **Synonyms**: AppChain +//! +//! #### PVF +//! The Parachain Validation Function (PVF) is the current runtime Wasm for a parachain that is +//! stored on the Relay chain. It is an essential component in the Polkadot ecosystem, encapsulating +//! the validation logic for each parachain. The PVF is executed by validators to verify the +//! correctness of parachain blocks. This is critical for ensuring that each block follows the logic +//! set by its respective parachain, thus maintaining the integrity and security of the entire +//! network. +//! +//! **Synonyms**: Parachain Validation Function -- GitLab From 6742aba05f21da1c890450a666d0fa27c87ca7c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 30 Nov 2023 18:04:12 +0100 Subject: [PATCH 171/199] Bump the known_good_semver group with 2 updates (#2570) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps the known_good_semver group with 2 updates: [serde](https://github.com/serde-rs/serde) and [clap](https://github.com/clap-rs/clap). Updates `serde` from 1.0.188 to 1.0.193
Release notes

Sourced from serde's releases.

v1.0.193

v1.0.192

v1.0.191

  • Documentation improvements

v1.0.190

  • Preserve NaN sign when deserializing f32 from f64 or vice versa (#2637)

v1.0.189

  • Fix "cannot infer type" error when internally tagged enum contains untagged variant (#2613, thanks @​ahl)
Commits
  • 44613c7 Release 1.0.193
  • c706281 Merge pull request #2655 from dtolnay/rangestartend
  • 65d75b8 Add RangeFrom and RangeTo tests
  • 332b0cb Merge pull request #2654 from dtolnay/rangestartend
  • 8c4af41 Fix more RangeFrom / RangeEnd mixups
  • 24a78f0 Merge pull request #2653 from emilbonnek/fix/range-to-from-de-mixup
  • c91c334 Fix Range{From,To} deserialize mixup
  • 2083f43 Update ui test suite to nightly-2023-11-19
  • 4676abd Release 1.0.192
  • 35700eb Merge pull request #2646 from robsdedude/fix/2643/allow-tag-field-in-untagged
  • Additional commits viewable in compare view

Updates `clap` from 4.4.6 to 4.4.10
Release notes

Sourced from clap's releases.

v4.4.10

[4.4.10] - 2023-11-28

Documentation

  • Link out to changelog
  • Cross link derive's attribute reference to derive tutorial

v4.4.9

[4.4.9] - 2023-11-27

Fixes

  • (help) Show correct Command::about under flattened headings
  • (help) Respect hide when flattening subcommands

v4.4.8

[4.4.8] - 2023-11-10

Features

  • Add Command::flatten_help to allow git stash -h like help for subcommands
Changelog

Sourced from clap's changelog.

[4.4.10] - 2023-11-28

Documentation

  • Link out to changelog
  • Cross link derive's attribute reference to derive tutorial

[4.4.9] - 2023-11-27

Fixes

  • (help) Show correct Command::about under flattened headings
  • (help) Respect hide when flattening subcommands

[4.4.8] - 2023-11-10

Features

  • Add Command::flatten_help to allow git stash -h like help for subcommands

[4.4.7] - 2023-10-24

Performance

  • Reduced code size
Commits
  • c0a1814 chore: Release
  • c83e681 docs: Update changelog
  • 91bcac4 Merge pull request #5230 from epage/migrate
  • 030d875 docs: Link out to the changelog at the relevant tag
  • b661a9d Merge pull request #5229 from epage/derive
  • a08587b docs(derive): Link to tutorial sections for attributes
  • 21b671f chore: Release
  • 93ba76d docs: Update changelog
  • c1c55b3 Merge pull request #5228 from epage/flat
  • b13f6d9 fix(help): Hide 'help' if only flattened subcommand
  • Additional commits viewable in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 84 +++++++++---------- cumulus/client/cli/Cargo.toml | 2 +- .../relay-chain-rpc-interface/Cargo.toml | 2 +- cumulus/parachain-template/node/Cargo.toml | 4 +- .../pallets/template/Cargo.toml | 2 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 2 +- cumulus/polkadot-parachain/Cargo.toml | 4 +- cumulus/test/service/Cargo.toml | 4 +- polkadot/cli/Cargo.toml | 2 +- polkadot/node/malus/Cargo.toml | 2 +- polkadot/node/primitives/Cargo.toml | 2 +- polkadot/node/service/Cargo.toml | 2 +- polkadot/parachain/Cargo.toml | 2 +- .../test-parachains/adder/collator/Cargo.toml | 2 +- .../undying/collator/Cargo.toml | 2 +- polkadot/primitives/Cargo.toml | 2 +- polkadot/runtime/common/Cargo.toml | 2 +- polkadot/runtime/parachains/Cargo.toml | 2 +- polkadot/runtime/rococo/Cargo.toml | 2 +- polkadot/runtime/test-runtime/Cargo.toml | 2 +- polkadot/runtime/westend/Cargo.toml | 2 +- polkadot/utils/generate-bags/Cargo.toml | 2 +- .../remote-ext-tests/bags-list/Cargo.toml | 2 +- polkadot/xcm/Cargo.toml | 2 +- polkadot/xcm/pallet-xcm/Cargo.toml | 2 +- substrate/bin/minimal/node/Cargo.toml | 2 +- substrate/bin/node-template/node/Cargo.toml | 2 +- substrate/bin/node/bench/Cargo.toml | 4 +- substrate/bin/node/cli/Cargo.toml | 6 +- substrate/bin/node/inspect/Cargo.toml | 2 +- .../bin/utils/chain-spec-builder/Cargo.toml | 2 +- substrate/bin/utils/subkey/Cargo.toml | 2 +- substrate/client/chain-spec/Cargo.toml | 2 +- substrate/client/cli/Cargo.toml | 4 +- .../client/consensus/babe/rpc/Cargo.toml | 2 +- substrate/client/consensus/beefy/Cargo.toml | 2 +- .../client/consensus/beefy/rpc/Cargo.toml | 2 +- substrate/client/consensus/grandpa/Cargo.toml | 2 +- .../client/consensus/grandpa/rpc/Cargo.toml | 2 +- .../merkle-mountain-range/rpc/Cargo.toml | 2 +- substrate/client/network/Cargo.toml | 2 +- substrate/client/rpc-api/Cargo.toml | 2 +- substrate/client/service/Cargo.toml | 2 +- substrate/client/storage-monitor/Cargo.toml | 2 +- substrate/client/sync-state-rpc/Cargo.toml | 2 +- substrate/client/sysinfo/Cargo.toml | 2 +- substrate/client/telemetry/Cargo.toml | 2 +- substrate/client/tracing/Cargo.toml | 2 +- substrate/client/transaction-pool/Cargo.toml | 2 +- .../client/transaction-pool/api/Cargo.toml | 2 +- substrate/frame/beefy-mmr/Cargo.toml | 2 +- substrate/frame/beefy/Cargo.toml | 2 +- substrate/frame/benchmarking/Cargo.toml | 2 +- substrate/frame/conviction-voting/Cargo.toml | 2 +- substrate/frame/democracy/Cargo.toml | 2 +- .../solution-type/fuzzer/Cargo.toml | 2 +- substrate/frame/message-queue/Cargo.toml | 2 +- substrate/frame/mixnet/Cargo.toml | 2 +- substrate/frame/offences/Cargo.toml | 2 +- substrate/frame/referenda/Cargo.toml | 2 +- substrate/frame/remark/Cargo.toml | 2 +- substrate/frame/staking/Cargo.toml | 2 +- .../frame/state-trie-migration/Cargo.toml | 2 +- substrate/frame/support/Cargo.toml | 2 +- substrate/frame/support/test/Cargo.toml | 2 +- .../frame/support/test/pallet/Cargo.toml | 2 +- substrate/frame/system/Cargo.toml | 2 +- substrate/frame/tips/Cargo.toml | 2 +- .../frame/transaction-payment/Cargo.toml | 2 +- .../asset-tx-payment/Cargo.toml | 2 +- .../frame/transaction-storage/Cargo.toml | 2 +- substrate/frame/treasury/Cargo.toml | 2 +- .../primitives/application-crypto/Cargo.toml | 2 +- substrate/primitives/arithmetic/Cargo.toml | 2 +- .../primitives/consensus/babe/Cargo.toml | 2 +- .../primitives/consensus/beefy/Cargo.toml | 2 +- .../primitives/consensus/grandpa/Cargo.toml | 2 +- .../primitives/consensus/sassafras/Cargo.toml | 2 +- substrate/primitives/core/Cargo.toml | 2 +- .../merkle-mountain-range/Cargo.toml | 2 +- .../primitives/npos-elections/Cargo.toml | 2 +- .../npos-elections/fuzzer/Cargo.toml | 2 +- substrate/primitives/rpc/Cargo.toml | 2 +- substrate/primitives/runtime/Cargo.toml | 2 +- substrate/primitives/staking/Cargo.toml | 2 +- substrate/primitives/storage/Cargo.toml | 2 +- .../primitives/test-primitives/Cargo.toml | 2 +- substrate/primitives/version/Cargo.toml | 2 +- substrate/primitives/weights/Cargo.toml | 2 +- .../ci/node-template-release/Cargo.toml | 2 +- substrate/test-utils/client/Cargo.toml | 2 +- substrate/test-utils/runtime/Cargo.toml | 2 +- .../utils/frame/benchmarking-cli/Cargo.toml | 4 +- .../frame/frame-utilities-cli/Cargo.toml | 2 +- .../generate-bags/node-runtime/Cargo.toml | 2 +- .../frame/remote-externalities/Cargo.toml | 2 +- .../utils/frame/try-runtime/cli/Cargo.toml | 4 +- 98 files changed, 148 insertions(+), 148 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7caa46cad8..05715ad8dfb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2497,23 +2497,23 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.6" +version = "4.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +checksum = "41fffed7514f420abec6d183b1d3acfd9099c79c3a10a06ade4f8203f1411272" dependencies = [ "clap_builder", - "clap_derive 4.4.2", + "clap_derive 4.4.7", ] [[package]] name = "clap_builder" -version = "4.4.6" +version = "4.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +checksum = "63361bae7eef3771745f02d8d892bec2fee5f6e34af316ba556e7f97a7069ff1" dependencies = [ "anstream", "anstyle", - "clap_lex 0.5.1", + "clap_lex 0.6.0", "strsim", "terminal_size", ] @@ -2524,7 +2524,7 @@ version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "586a385f7ef2f8b4d86bddaa0c094794e7ccbfe5ffef1f434fe928143fc783a5" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", ] [[package]] @@ -2542,9 +2542,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", @@ -2563,9 +2563,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "coarsetime" @@ -3099,7 +3099,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.6", + "clap 4.4.10", "criterion-plot", "futures", "is-terminal", @@ -3274,7 +3274,7 @@ dependencies = [ name = "cumulus-client-cli" version = "0.1.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "parity-scale-codec", "sc-chain-spec", "sc-cli", @@ -3995,7 +3995,7 @@ name = "cumulus-test-service" version = "0.1.0" dependencies = [ "async-trait", - "clap 4.4.6", + "clap 4.4.10", "criterion 0.5.1", "cumulus-client-cli", "cumulus-client-consensus-common", @@ -5307,7 +5307,7 @@ dependencies = [ "Inflector", "array-bytes 6.1.0", "chrono", - "clap 4.4.6", + "clap 4.4.10", "comfy-table", "frame-benchmarking", "frame-support", @@ -5399,7 +5399,7 @@ dependencies = [ name = "frame-election-solution-type-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-support", @@ -7977,7 +7977,7 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" name = "minimal-node" version = "4.0.0-dev" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "frame", "futures", "futures-timer", @@ -8383,7 +8383,7 @@ name = "node-bench" version = "0.9.0-dev" dependencies = [ "array-bytes 6.1.0", - "clap 4.4.6", + "clap 4.4.10", "derive_more", "fs_extra", "futures", @@ -8458,7 +8458,7 @@ dependencies = [ name = "node-runtime-generate-bags" version = "3.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "generate-bags", "kitchensink-runtime", ] @@ -8467,7 +8467,7 @@ dependencies = [ name = "node-template" version = "4.0.0-dev" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "frame-benchmarking", "frame-benchmarking-cli", "frame-system", @@ -8511,7 +8511,7 @@ dependencies = [ name = "node-template-release" version = "3.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "flate2", "fs_extra", "glob", @@ -11029,7 +11029,7 @@ dependencies = [ name = "parachain-template-node" version = "0.1.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "color-print", "cumulus-client-cli", "cumulus-client-collator", @@ -11779,7 +11779,7 @@ dependencies = [ name = "polkadot-cli" version = "1.1.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "frame-benchmarking-cli", "futures", "log", @@ -12616,7 +12616,7 @@ dependencies = [ "async-trait", "bridge-hub-rococo-runtime", "bridge-hub-westend-runtime", - "clap 4.4.6", + "clap 4.4.10", "collectives-westend-runtime", "color-print", "contracts-rococo-runtime", @@ -13110,7 +13110,7 @@ version = "1.0.0" dependencies = [ "assert_matches", "async-trait", - "clap 4.4.6", + "clap 4.4.10", "color-eyre", "futures", "futures-timer", @@ -13257,7 +13257,7 @@ dependencies = [ name = "polkadot-voter-bags" version = "1.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "generate-bags", "sp-io", "westend-runtime", @@ -14058,7 +14058,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "remote-ext-tests-bags-list" version = "1.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "frame-system", "log", "pallet-bags-list-remote-tests", @@ -14831,7 +14831,7 @@ dependencies = [ "array-bytes 6.1.0", "bip39", "chrono", - "clap 4.4.6", + "clap 4.4.10", "fdlimit", "futures", "futures-timer", @@ -15975,7 +15975,7 @@ dependencies = [ name = "sc-storage-monitor" version = "0.1.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "fs4", "log", "sc-client-db", @@ -16431,18 +16431,18 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", @@ -17519,7 +17519,7 @@ dependencies = [ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "honggfuzz", "rand 0.8.5", "sp-npos-elections", @@ -18027,7 +18027,7 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" name = "staging-chain-spec-builder" version = "2.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "log", "sc-chain-spec", "serde_json", @@ -18040,7 +18040,7 @@ version = "3.0.0-dev" dependencies = [ "array-bytes 6.1.0", "assert_cmd", - "clap 4.4.6", + "clap 4.4.10", "clap_complete", "criterion 0.4.0", "frame-benchmarking", @@ -18145,7 +18145,7 @@ dependencies = [ name = "staging-node-inspect" version = "0.9.0-dev" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "parity-scale-codec", "sc-cli", "sc-client-api", @@ -18354,7 +18354,7 @@ dependencies = [ name = "subkey" version = "3.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "sc-cli", ] @@ -18396,7 +18396,7 @@ dependencies = [ name = "substrate-frame-cli" version = "4.0.0-dev" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "frame-support", "frame-system", "sc-cli", @@ -18871,7 +18871,7 @@ dependencies = [ name = "test-parachain-adder-collator" version = "1.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "futures", "futures-timer", "log", @@ -18919,7 +18919,7 @@ dependencies = [ name = "test-parachain-undying-collator" version = "1.0.0" dependencies = [ - "clap 4.4.6", + "clap 4.4.10", "futures", "futures-timer", "log", @@ -19594,7 +19594,7 @@ version = "0.10.0-dev" dependencies = [ "assert_cmd", "async-trait", - "clap 4.4.6", + "clap 4.4.10", "frame-remote-externalities", "frame-try-runtime", "hex", diff --git a/cumulus/client/cli/Cargo.toml b/cumulus/client/cli/Cargo.toml index 0f942feb595..35945bf4052 100644 --- a/cumulus/client/cli/Cargo.toml +++ b/cumulus/client/cli/Cargo.toml @@ -7,7 +7,7 @@ description = "Parachain node CLI utilities." license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } url = "2.4.0" diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 0159cade23f..67ee3571c04 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -36,7 +36,7 @@ tracing = "0.1.37" async-trait = "0.1.73" url = "2.4.0" serde_json = "1.0.108" -serde = "1.0.188" +serde = "1.0.193" schnellru = "0.2.1" smoldot = { version = "0.11.0", default_features = false, features = ["std"]} smoldot-light = { version = "0.9.0", default_features = false, features = ["std"] } diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index 73bbbb6d771..ba4060f54f9 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -11,10 +11,10 @@ build = "build.rs" publish = false [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } log = "0.4.20" codec = { package = "parity-scale-codec", version = "3.0.0" } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } jsonrpsee = { version = "0.16.2", features = ["server"] } futures = "0.3.28" serde_json = "1.0.108" diff --git a/cumulus/parachain-template/pallets/template/Cargo.toml b/cumulus/parachain-template/pallets/template/Cargo.toml index 92545783934..4267341a2d7 100644 --- a/cumulus/parachain-template/pallets/template/Cargo.toml +++ b/cumulus/parachain-template/pallets/template/Cargo.toml @@ -21,7 +21,7 @@ frame-support = { path = "../../../../substrate/frame/support", default-features frame-system = { path = "../../../../substrate/frame/system", default-features = false} [dev-dependencies] -serde = { version = "1.0.188" } +serde = { version = "1.0.193" } # Substrate sp-core = { path = "../../../../substrate/primitives/core", default-features = false} diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 42ea63bfea5..809e952ccb9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -18,7 +18,7 @@ log = { version = "0.4.20", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = [ "derive", ] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +serde = { version = "1.0.193", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 4d2e60d971d..90d72ea3edc 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -14,7 +14,7 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +serde = { version = "1.0.193", optional = true, features = ["derive"] } smallvec = "1.11.0" # Substrate diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index a7fdfb5d6c9..cae9b3b6889 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -13,12 +13,12 @@ path = "src/main.rs" [dependencies] async-trait = "0.1.73" -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.28" hex-literal = "0.4.1" log = "0.4.20" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" # Local diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index ed8c8748cc8..e35173ecf16 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -11,12 +11,12 @@ path = "src/main.rs" [dependencies] async-trait = "0.1.73" -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } criterion = { version = "0.5.1", features = [ "async_tokio" ] } jsonrpsee = { version = "0.16.2", features = ["server"] } rand = "0.8.5" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" tokio = { version = "1.32.0", features = ["macros"] } tracing = "0.1.37" diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 0c2925c76e8..768077b8bf2 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -15,7 +15,7 @@ wasm-opt = false crate-type = ["cdylib", "rlib"] [dependencies] -clap = { version = "4.4.6", features = ["derive"], optional = true } +clap = { version = "4.4.10", features = ["derive"], optional = true } log = "0.4.17" thiserror = "1.0.48" futures = "0.3.21" diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index f52f0cc0282..cd0a25d7593 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -40,7 +40,7 @@ assert_matches = "1.5" async-trait = "0.1.57" sp-keystore = { path = "../../../substrate/primitives/keystore" } sp-core = { path = "../../../substrate/primitives/core" } -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } diff --git a/polkadot/node/primitives/Cargo.toml b/polkadot/node/primitives/Cargo.toml index c39fd5947b0..6c37ebb986f 100644 --- a/polkadot/node/primitives/Cargo.toml +++ b/polkadot/node/primitives/Cargo.toml @@ -21,7 +21,7 @@ polkadot-parachain-primitives = { path = "../../parachain", default-features = f schnorrkel = "0.9.1" thiserror = "1.0.48" bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] zstd = { version = "0.12.4", default-features = false } diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index 19efd1b66c4..b5aa46a41dc 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -82,7 +82,7 @@ is_executable = "1.0.1" gum = { package = "tracing-gum", path = "../gum" } log = "0.4.17" schnellru = "0.2.1" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0.48" kvdb = "0.13.0" diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 27aa117a87f..9aa4dc10a5f 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -21,7 +21,7 @@ derive_more = "0.99.11" bounded-collections = { version = "0.1.8", default-features = false, features = ["serde"] } # all optional crates. -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"] } [features] default = [ "std" ] diff --git a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml index 70f2ae769a8..eeb367f8aee 100644 --- a/polkadot/parachain/test-parachains/adder/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/collator/Cargo.toml @@ -13,7 +13,7 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index 578c3d6715d..b7eab13bb9c 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -13,7 +13,7 @@ path = "src/main.rs" [dependencies] parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 316644a372d..78a25d67081 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -11,7 +11,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "se hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"] } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } inherents = { package = "sp-inherents", path = "../../substrate/primitives/inherents", default-features = false } diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 4391b6d81eb..053eb88967f 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -13,7 +13,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc"] } serde_derive = { version = "1.0.117" } static_assertions = "1.1.0" diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index b6800fc0844..9e8c9a5c759 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -13,7 +13,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"] } derive_more = "0.99.17" bitflags = "1.3.2" diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 9693d351cf4..926768e9748 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.188", default-features = false } +serde = { version = "1.0.193", default-features = false } serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" smallvec = "1.8.0" diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 29debad7b53..85e452d1bd4 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -13,7 +13,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false } +serde = { version = "1.0.193", default-features = false } serde_derive = { version = "1.0.117", optional = true } smallvec = "1.8.0" diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index eaebf01e3a7..c31db2703c1 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -13,7 +13,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } -serde = { version = "1.0.188", default-features = false } +serde = { version = "1.0.193", default-features = false } serde_derive = { version = "1.0.117", optional = true } smallvec = "1.8.0" diff --git a/polkadot/utils/generate-bags/Cargo.toml b/polkadot/utils/generate-bags/Cargo.toml index ed29001aa4f..1cd7b057c87 100644 --- a/polkadot/utils/generate-bags/Cargo.toml +++ b/polkadot/utils/generate-bags/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true description = "CLI to generate voter bags for Polkadot runtimes" [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } generate-bags = { path = "../../../substrate/utils/frame/generate-bags" } sp-io = { path = "../../../substrate/primitives/io" } diff --git a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml index e305edc039b..7f0c49f0c26 100644 --- a/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml +++ b/polkadot/utils/remote-ext-tests/bags-list/Cargo.toml @@ -15,6 +15,6 @@ sp-tracing = { path = "../../../../substrate/primitives/tracing" } frame-system = { path = "../../../../substrate/frame/system" } sp-core = { path = "../../../../substrate/primitives/core" } -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } log = "0.4.17" tokio = { version = "1.24.2", features = ["macros"] } diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 60c27f7fcfc..8190b812bf6 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -14,7 +14,7 @@ log = { version = "0.4.17", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len" ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } -serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } xcm-procedural = { path = "procedural" } environmental = { version = "1.1.4", default-features = false } diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index cc5d7d97c45..209c826b8f8 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true bounded-collections = { version = "0.1.8", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +serde = { version = "1.0.193", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } frame-support = { path = "../../../substrate/frame/support", default-features = false} diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml index 0506d0838f1..56f123a4719 100644 --- a/substrate/bin/minimal/node/Cargo.toml +++ b/substrate/bin/minimal/node/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] name = "minimal-node" [dependencies] -clap = { version = "4.0.9", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" jsonrpsee = { version = "0.16.2", features = ["server"] } diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml index 61953631d79..ed1980fbb82 100644 --- a/substrate/bin/node-template/node/Cargo.toml +++ b/substrate/bin/node-template/node/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] name = "node-template" [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } futures = { version = "0.3.21", features = ["thread-pool"]} serde_json = "1.0.108" diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index ee429ee8c0c..a3fb8b43030 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -13,7 +13,7 @@ publish = false [dependencies] array-bytes = "6.1" -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } log = "0.4.17" node-primitives = { path = "../primitives" } node-testing = { path = "../testing" } @@ -21,7 +21,7 @@ kitchensink-runtime = { path = "../runtime" } sc-client-api = { path = "../../../client/api" } sp-runtime = { path = "../../../primitives/runtime" } sp-state-machine = { path = "../../../primitives/state-machine" } -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" derive_more = { version = "0.99.17", default-features = false, features = ["display"] } kvdb = "0.13.0" diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 656d6f86505..3e8ffdf83d0 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -38,9 +38,9 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies array-bytes = "6.1" -clap = { version = "4.4.6", features = ["derive"], optional = true } +clap = { version = "4.4.10", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1" } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } jsonrpsee = { version = "0.16.2", features = ["server"] } futures = "0.3.21" log = "0.4.17" @@ -157,7 +157,7 @@ sp-trie = { path = "../../../primitives/trie" } sp-state-machine = { path = "../../../primitives/state-machine" } [build-dependencies] -clap = { version = "4.4.6", optional = true } +clap = { version = "4.4.10", optional = true } clap_complete = { version = "4.0.2", optional = true } node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index cfdec6af5de..a5187d8f265 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -12,7 +12,7 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = "1.0" sc-cli = { path = "../../../client/cli" } diff --git a/substrate/bin/utils/chain-spec-builder/Cargo.toml b/substrate/bin/utils/chain-spec-builder/Cargo.toml index f587989e003..bfa2951cf00 100644 --- a/substrate/bin/utils/chain-spec-builder/Cargo.toml +++ b/substrate/bin/utils/chain-spec-builder/Cargo.toml @@ -20,7 +20,7 @@ name = "chain-spec-builder" crate-type = ["rlib"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } log = "0.4.17" sc-chain-spec = { path = "../../../client/chain-spec" } serde_json = "1.0.108" diff --git a/substrate/bin/utils/subkey/Cargo.toml b/substrate/bin/utils/subkey/Cargo.toml index 6606d8ac365..1769afd865b 100644 --- a/substrate/bin/utils/subkey/Cargo.toml +++ b/substrate/bin/utils/subkey/Cargo.toml @@ -17,5 +17,5 @@ path = "src/main.rs" name = "subkey" [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } sc-cli = { path = "../../../client/cli" } diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 5b7cdda8ebe..009f8cef957 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } memmap2 = "0.5.0" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" sc-client-api = { path = "../api" } sc-chain-spec-derive = { path = "derive" } diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index c4464c5f787..54280d4bf15 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" chrono = "0.4.27" -clap = { version = "4.4.6", features = ["derive", "string", "wrap_help"] } +clap = { version = "4.4.10", features = ["derive", "string", "wrap_help"] } fdlimit = "0.3.0" futures = "0.3.21" itertools = "0.10.3" @@ -26,7 +26,7 @@ parity-scale-codec = "3.6.1" rand = "0.8.5" regex = "1.6.0" rpassword = "7.0.0" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" thiserror = "1.0.48" bip39 = "2.0.0" diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index c95d95ae427..f6551b2ed54 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } futures = "0.3.21" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0" sc-consensus-babe = { path = ".." } sc-consensus-epochs = { path = "../../epochs" } diff --git a/substrate/client/consensus/beefy/Cargo.toml b/substrate/client/consensus/beefy/Cargo.toml index aae5a44d7fa..6ee70b523bc 100644 --- a/substrate/client/consensus/beefy/Cargo.toml +++ b/substrate/client/consensus/beefy/Cargo.toml @@ -38,7 +38,7 @@ sp-mmr-primitives = { path = "../../../primitives/merkle-mountain-range" } sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] -serde = "1.0.188" +serde = "1.0.193" tempfile = "3.1.0" tokio = "1.22.0" sc-block-builder = { path = "../../block-builder" } diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index c7464fdc653..c077908c93c 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.21" jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } log = "0.4" parking_lot = "0.12.1" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0" sc-consensus-beefy = { path = ".." } sp-consensus-beefy = { path = "../../../../primitives/consensus/beefy" } diff --git a/substrate/client/consensus/grandpa/Cargo.toml b/substrate/client/consensus/grandpa/Cargo.toml index 85f98e7546e..e1baff3bbf2 100644 --- a/substrate/client/consensus/grandpa/Cargo.toml +++ b/substrate/client/consensus/grandpa/Cargo.toml @@ -53,7 +53,7 @@ sp-runtime = { path = "../../../primitives/runtime" } [dev-dependencies] assert_matches = "1.3.0" finality-grandpa = { version = "0.16.2", features = ["derive-codec", "test-helpers"] } -serde = "1.0.188" +serde = "1.0.193" tokio = "1.22.0" sc-network = { path = "../../network" } sc-network-test = { path = "../../network/test" } diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index e2f9e40afb2..2e808ac0bdd 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -15,7 +15,7 @@ futures = "0.3.16" jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } log = "0.4.8" parity-scale-codec = { version = "3.6.1", features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0" sc-client-api = { path = "../../../api" } sc-consensus-grandpa = { path = ".." } diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index e75c5f1baa8..c64354abaaf 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 8b599f058f7..2a14ef2dd84 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -33,7 +33,7 @@ parking_lot = "0.12.1" partial_sort = "0.2.0" pin-project = "1.0.12" rand = "0.8.5" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" smallvec = "1.11.0" thiserror = "1.0" diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index e72bbe48ee3..baf94f342ef 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0" sc-chain-spec = { path = "../chain-spec" } diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index de69c50702a..63c498964fe 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -34,7 +34,7 @@ log = "0.4.17" futures-timer = "3.0.1" exit-future = "0.2.0" pin-project = "1.0.12" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" sc-keystore = { path = "../keystore" } sp-runtime = { path = "../../primitives/runtime" } diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index 4c1f2e46504..66302982bee 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -9,7 +9,7 @@ description = "Storage monitor service for substrate" homepage = "https://substrate.io" [dependencies] -clap = { version = "4.4.6", features = ["derive", "string"] } +clap = { version = "4.4.10", features = ["derive", "string"] } log = "0.4.17" fs4 = "0.7.0" sc-client-db = { path = "../db", default-features = false} diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index 569cd067f27..da5d22c2b9a 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0.48" sc-chain-spec = { path = "../chain-spec" } diff --git a/substrate/client/sysinfo/Cargo.toml b/substrate/client/sysinfo/Cargo.toml index 86f03050c44..4cd1b222bc6 100644 --- a/substrate/client/sysinfo/Cargo.toml +++ b/substrate/client/sysinfo/Cargo.toml @@ -21,7 +21,7 @@ rand = "0.8.5" rand_pcg = "0.3.1" derive_more = "0.99" regex = "1" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" sc-telemetry = { path = "../telemetry" } sp-core = { path = "../../primitives/core" } diff --git a/substrate/client/telemetry/Cargo.toml b/substrate/client/telemetry/Cargo.toml index a693a2884b5..71119df1153 100644 --- a/substrate/client/telemetry/Cargo.toml +++ b/substrate/client/telemetry/Cargo.toml @@ -22,7 +22,7 @@ parking_lot = "0.12.1" pin-project = "1.0.12" sc-utils = { path = "../utils" } rand = "0.8.5" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0.48" wasm-timer = "0.2.5" diff --git a/substrate/client/tracing/Cargo.toml b/substrate/client/tracing/Cargo.toml index 796d4f1d826..844969c5ce2 100644 --- a/substrate/client/tracing/Cargo.toml +++ b/substrate/client/tracing/Cargo.toml @@ -23,7 +23,7 @@ log = { version = "0.4.17" } parking_lot = "0.12.1" regex = "1.6.0" rustc-hash = "1.1.0" -serde = "1.0.188" +serde = "1.0.193" thiserror = "1.0.48" tracing = "0.1.29" tracing-log = "0.1.3" diff --git a/substrate/client/transaction-pool/Cargo.toml b/substrate/client/transaction-pool/Cargo.toml index b893dc839ed..3e90304497f 100644 --- a/substrate/client/transaction-pool/Cargo.toml +++ b/substrate/client/transaction-pool/Cargo.toml @@ -20,7 +20,7 @@ futures-timer = "3.0.2" linked-hash-map = "0.5.4" log = "0.4.17" parking_lot = "0.12.1" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0.48" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index f5760c271ad..dad1e52bb54 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -13,7 +13,7 @@ async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" log = "0.4.17" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0.48" sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core", default-features = false} diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index fe0321bea51..ee2c13e7ffd 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -13,7 +13,7 @@ array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 1da09321342..9a88de1df15 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} pallet-authorship = { path = "../authorship", default-features = false} diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index 79f35f62625..c3a17bc82b3 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -18,7 +18,7 @@ linregress = { version = "0.5.1", optional = true } log = { version = "0.4.17", default-features = false } paste = "1.0" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } frame-support = { path = "../support", default-features = false} frame-support-procedural = { path = "../support/procedural", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index 1dc723576dc..bc0ff0f9dc5 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -19,7 +19,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "max-encoded-len", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } +serde = { version = "1.0.193", features = ["derive"], optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 870bfaa9b89..5be38214cf8 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } +serde = { version = "1.0.193", features = ["derive"], optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index cc90ed119ad..50f5bd908c7 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -13,7 +13,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 48304cd882c..98ca3f60cc8 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -11,7 +11,7 @@ description = "FRAME pallet to queue and process messages" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true, features = ["derive"] } +serde = { version = "1.0.193", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } sp-core = { path = "../../primitives/core", default-features = false} diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 665c606fc37..12597b8b5d5 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -19,7 +19,7 @@ frame-support = { default-features = false, path = "../support" } frame-system = { default-features = false, path = "../system" } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive"] } +serde = { version = "1.0.193", default-features = false, features = ["derive"] } sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto" } sp-arithmetic = { default-features = false, path = "../../primitives/arithmetic" } sp-io = { default-features = false, path = "../../primitives/io" } diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index ac204a7813a..c1cb950069a 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} pallet-balances = { path = "../balances", default-features = false} diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index 4f53e2bc002..d892fa32a6a 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } +serde = { version = "1.0.193", features = ["derive"], optional = true } sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index ad04140ae9f..fe80d03c8d2 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index c5cac9fefa7..8fb0025e58f 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"]} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"]} codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 9e81397fadd..93e4d80b9ac 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index b8e21e60761..887128e9561 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = { version = "6.1", default-features = false } -serde = { version = "1.0.188", default-features = false, features = ["alloc", "derive"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index fc10725e814..54f14df75bc 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" -serde = { version = "1.0.188", default-features = false, features = ["derive"] } +serde = { version = "1.0.193", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index c96e22ff1ab..8d0f252326d 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive"] } +serde = { version = "1.0.193", default-features = false, features = ["derive"] } frame-support = { path = "../..", default-features = false} frame-system = { path = "../../../system", default-features = false} sp-runtime = { path = "../../../../primitives/runtime", default-features = false} diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 8f7e0052fe0..8de44947dc0 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -17,7 +17,7 @@ cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"] } frame-support = { path = "../support", default-features = false} sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } sp-io = { path = "../../primitives/io", default-features = false} diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index 6df886b93d7..a189c6691af 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } +serde = { version = "1.0.193", features = ["derive"], optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index ad384755614..407740cfc0f 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} sp-core = { path = "../../primitives/core", default-features = false} diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index ae236728cd5..59232a72b99 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -27,7 +27,7 @@ frame-benchmarking = { path = "../../benchmarking", default-features = false, op # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } [dev-dependencies] serde_json = "1.0.108" diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index e90f063427b..a847ca6a790 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", optional = true } +serde = { version = "1.0.193", optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index 6fb23380f82..c9fdb3d5b90 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = docify = "0.2.0" impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive"], optional = true } +serde = { version = "1.0.193", features = ["derive"], optional = true } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} frame-support = { path = "../support", default-features = false} frame-system = { path = "../system", default-features = false} diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index a4a1bc44a69..7896df94913 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-core = { path = "../core", default-features = false} codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, optional = true, features = ["derive", "alloc"] } sp-std = { path = "../std", default-features = false} sp-io = { path = "../io", default-features = false} diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index 249aebec68f..ff938795aa3 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -21,7 +21,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } static_assertions = "1.1.0" sp-std = { path = "../std", default-features = false} diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index db8bb8cb154..f952a8ab4ee 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } sp-api = { path = "../../api", default-features = false} sp-application-crypto = { path = "../../application-crypto", default-features = false} sp-consensus-slots = { path = "../slots", default-features = false} diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index e78323c8980..67c6ad57e2d 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, optional = true, features = ["derive", "alloc"] } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-core = { path = "../../core", default-features = false } diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index 8757869995d..ffba6490823 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", features = ["derive", "alloc"], default-features = false, optional = true } +serde = { version = "1.0.193", features = ["derive", "alloc"], default-features = false, optional = true } sp-api = { path = "../../api", default-features = false} sp-application-crypto = { path = "../../application-crypto", default-features = false} sp-core = { path = "../../core", default-features = false} diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 696e0a64596..2c5971e919e 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive"], optional = true } sp-api = { default-features = false, path = "../../api" } sp-application-crypto = { default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } sp-consensus-slots = { default-features = false, path = "../slots" } diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 34485c72ab0..fe82a2cd62e 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive","max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.188", optional = true, default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", optional = true, default-features = false, features = ["derive", "alloc"] } bounded-collections = { version = "0.1.8", default-features = false } primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 5216765825f..89e03f1cabf 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -16,7 +16,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } -serde = { version = "1.0.188", features = ["derive", "alloc"], default-features = false, optional = true } +serde = { version = "1.0.193", features = ["derive", "alloc"], default-features = false, optional = true } sp-api = { path = "../api", default-features = false} sp-core = { path = "../core", default-features = false} sp-debug-derive = { path = "../debug-derive", default-features = false} diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index 90418e561f2..44a53e856ba 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } sp-arithmetic = { path = "../arithmetic", default-features = false} sp-core = { path = "../core", default-features = false} sp-runtime = { path = "../runtime", default-features = false} diff --git a/substrate/primitives/npos-elections/fuzzer/Cargo.toml b/substrate/primitives/npos-elections/fuzzer/Cargo.toml index 5e75f926f87..8bba69a7543 100644 --- a/substrate/primitives/npos-elections/fuzzer/Cargo.toml +++ b/substrate/primitives/npos-elections/fuzzer/Cargo.toml @@ -14,7 +14,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } honggfuzz = "0.5" rand = { version = "0.8", features = ["std", "small_rng"] } sp-npos-elections = { path = ".." } diff --git a/substrate/primitives/rpc/Cargo.toml b/substrate/primitives/rpc/Cargo.toml index 77bdcc4f89a..cf10af31977 100644 --- a/substrate/primitives/rpc/Cargo.toml +++ b/substrate/primitives/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] rustc-hash = "1.1.0" -serde = { version = "1.0.188", features = ["derive"] } +serde = { version = "1.0.193", features = ["derive"] } sp-core = { path = "../core" } [dev-dependencies] diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 40a53d8d9e3..9e2dc8b0265 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -22,7 +22,7 @@ log = { version = "0.4.17", default-features = false } paste = "1.0" rand = { version = "0.8.5", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } sp-application-crypto = { path = "../application-crypto", default-features = false} sp-arithmetic = { path = "../arithmetic", default-features = false} sp-core = { path = "../core", default-features = false} diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index ef96276a003..bc69e2f61c1 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index 11e574f1c4c..95fa27e9cfb 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true, default-features = false } ref-cast = "1.0.0" -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } sp-debug-derive = { path = "../debug-derive", default-features = false} sp-std = { path = "../std", default-features = false} diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index a3775d7f61f..cacc3d86d17 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive"], optional = true } sp-application-crypto = { path = "../application-crypto", default-features = false} sp-core = { path = "../core", default-features = false} sp-runtime = { path = "../runtime", default-features = false} diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 41a83f01f66..7f272ead8d9 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } thiserror = { version = "1.0.48", optional = true } sp-core-hashing-proc-macro = { path = "../core/hashing/proc-macro" } sp-runtime = { path = "../runtime", default-features = false} diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index 6642f97029f..c36714a76a3 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.188", default-features = false, optional = true, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, optional = true, features = ["derive", "alloc"] } smallvec = "1.11.0" sp-arithmetic = { path = "../arithmetic", default-features = false} sp-core = { path = "../core", default-features = false} diff --git a/substrate/scripts/ci/node-template-release/Cargo.toml b/substrate/scripts/ci/node-template-release/Cargo.toml index 73ffce8645b..59c53e952b9 100644 --- a/substrate/scripts/ci/node-template-release/Cargo.toml +++ b/substrate/scripts/ci/node-template-release/Cargo.toml @@ -11,7 +11,7 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } flate2 = "1.0" fs_extra = "1.3" glob = "0.3" diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index 032fbaf4e65..dc034de876a 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -17,7 +17,7 @@ array-bytes = "6.1" async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.6.1" } futures = "0.3.21" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" sc-client-api = { path = "../../client/api" } sc-client-db = { path = "../../client/db", default-features = false, features = [ diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 2f1e192eded..88f8a1b572d 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -58,7 +58,7 @@ sp-consensus = { path = "../../primitives/consensus/common" } substrate-test-runtime-client = { path = "client" } sp-tracing = { path = "../../primitives/tracing" } json-patch = { version = "1.0.0", default-features = false } -serde = { version = "1.0.188", features = ["alloc", "derive"], default-features = false } +serde = { version = "1.0.193", features = ["alloc", "derive"], default-features = false } serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } [build-dependencies] diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index b67d08a85c2..a6f6264b65c 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "6.1" chrono = "0.4" -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1" } comfy-table = { version = "7.0.1", default-features = false } handlebars = "4.2.2" @@ -26,7 +26,7 @@ linked-hash-map = "0.5.4" log = "0.4.17" rand = { version = "0.8.4", features = ["small_rng"] } rand_pcg = "0.3.1" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" thiserror = "1.0.48" thousands = "0.2.0" diff --git a/substrate/utils/frame/frame-utilities-cli/Cargo.toml b/substrate/utils/frame/frame-utilities-cli/Cargo.toml index 24c04f47391..6e33ed88e0a 100644 --- a/substrate/utils/frame/frame-utilities-cli/Cargo.toml +++ b/substrate/utils/frame/frame-utilities-cli/Cargo.toml @@ -11,7 +11,7 @@ documentation = "https://docs.rs/substrate-frame-cli" readme = "README.md" [dependencies] -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } frame-support = { path = "../../../frame/support" } frame-system = { path = "../../../frame/system" } sc-cli = { path = "../../../client/cli" } diff --git a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml index 13e61138356..a2ee3883786 100644 --- a/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/substrate/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -14,4 +14,4 @@ kitchensink-runtime = { path = "../../../../bin/node/runtime" } generate-bags = { path = ".." } # third-party -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } diff --git a/substrate/utils/frame/remote-externalities/Cargo.toml b/substrate/utils/frame/remote-externalities/Cargo.toml index 7067aed238a..88071f7d634 100644 --- a/substrate/utils/frame/remote-externalities/Cargo.toml +++ b/substrate/utils/frame/remote-externalities/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpsee = { version = "0.16.2", features = ["http-client"] } codec = { package = "parity-scale-codec", version = "3.6.1" } log = "0.4.17" -serde = "1.0.188" +serde = "1.0.193" sp-core = { path = "../../../primitives/core" } sp-state-machine = { path = "../../../primitives/state-machine" } sp-io = { path = "../../../primitives/io" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 6be4306193c..97efa0e6624 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -35,11 +35,11 @@ frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true} substrate-rpc-client = { path = "../../rpc/client" } async-trait = "0.1.57" -clap = { version = "4.4.6", features = ["derive"] } +clap = { version = "4.4.10", features = ["derive"] } hex = { version = "0.4.3", default-features = false } log = "0.4.17" parity-scale-codec = "3.6.1" -serde = "1.0.188" +serde = "1.0.193" serde_json = "1.0.108" zstd = { version = "0.12.4", default-features = false } -- GitLab From 1f2ccaea05c8508f5cc916f8ae0ba60c102c5cf3 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Thu, 30 Nov 2023 11:55:33 -0600 Subject: [PATCH 172/199] Remove dependency on rand's SliceRandom shuffle implementation in gossip-support (#2555) In gossip-support, we shuffled the list of authorities using the `rand::seq::SliceRandom::shuffle`. This function's behavior is unspecified beyond being `O(n)` and could change in the future, leading to network issues between nodes using different shuffling algorithms. In practice, the implementation was a Fisher-Yates shuffle. This PR replaces the call with a re-implementation of Fisher-Yates and adds a test to ensure the behavior is the same between the two at the moment. --- Cargo.lock | 13 +++++++++++ .../node/network/gossip-support/Cargo.toml | 1 + .../node/network/gossip-support/src/lib.rs | 14 ++++++++++-- .../node/network/gossip-support/src/tests.rs | 22 +++++++++++++++++++ 4 files changed, 48 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 05715ad8dfb..c83eeb050df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4844,6 +4844,16 @@ dependencies = [ "syn 2.0.38", ] +[[package]] +name = "env_logger" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +dependencies = [ + "log", + "regex", +] + [[package]] name = "env_logger" version = "0.9.3" @@ -11905,6 +11915,7 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", + "quickcheck", "rand 0.8.5", "rand_chacha 0.3.1", "sc-network", @@ -13731,6 +13742,8 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ + "env_logger 0.8.4", + "log", "rand 0.8.5", ] diff --git a/polkadot/node/network/gossip-support/Cargo.toml b/polkadot/node/network/gossip-support/Cargo.toml index a9f68261add..64a9743d1db 100644 --- a/polkadot/node/network/gossip-support/Cargo.toml +++ b/polkadot/node/network/gossip-support/Cargo.toml @@ -35,3 +35,4 @@ polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" async-trait = "0.1.57" lazy_static = "1.4.0" +quickcheck = "1.0.3" diff --git a/polkadot/node/network/gossip-support/src/lib.rs b/polkadot/node/network/gossip-support/src/lib.rs index 674c86e5ce2..0d1b04f2ba2 100644 --- a/polkadot/node/network/gossip-support/src/lib.rs +++ b/polkadot/node/network/gossip-support/src/lib.rs @@ -32,7 +32,7 @@ use std::{ use futures::{channel::oneshot, select, FutureExt as _}; use futures_timer::Delay; -use rand::{seq::SliceRandom as _, SeedableRng}; +use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; use sc_network::{config::parse_addr, Multiaddr}; @@ -607,7 +607,7 @@ async fn update_gossip_topology( .map(|(i, a)| (a.clone(), ValidatorIndex(i as _))) .collect(); - canonical_shuffling.shuffle(&mut rng); + fisher_yates_shuffle(&mut rng, &mut canonical_shuffling[..]); for (i, (_, validator_index)) in canonical_shuffling.iter().enumerate() { shuffled_indices[validator_index.0 as usize] = i; } @@ -627,6 +627,16 @@ async fn update_gossip_topology( Ok(()) } +// Durstenfeld algorithm for the Fisher-Yates shuffle +// https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#The_modern_algorithm +fn fisher_yates_shuffle(rng: &mut R, items: &mut [T]) { + for i in (1..items.len()).rev() { + // invariant: elements with index > i have been locked in place. + let index = rng.gen_range(0u32..(i as u32 + 1)); + items.swap(i, index as usize); + } +} + #[overseer::subsystem(GossipSupport, error = SubsystemError, prefix = self::overseer)] impl GossipSupport where diff --git a/polkadot/node/network/gossip-support/src/tests.rs b/polkadot/node/network/gossip-support/src/tests.rs index 2e909bb0a67..e5ee101c31d 100644 --- a/polkadot/node/network/gossip-support/src/tests.rs +++ b/polkadot/node/network/gossip-support/src/tests.rs @@ -22,6 +22,8 @@ use assert_matches::assert_matches; use async_trait::async_trait; use futures::{executor, future, Future}; use lazy_static::lazy_static; +use quickcheck::quickcheck; +use rand::seq::SliceRandom as _; use sc_network::multiaddr::Protocol; use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; @@ -710,3 +712,23 @@ fn issues_a_connection_request_when_last_request_was_mostly_unresolved() { assert_eq!(state.last_session_index, Some(1)); assert!(state.last_failure.is_none()); } + +// note: this test was added at a time where the default `rand::SliceRandom::shuffle` +// function was used to shuffle authorities for the topology and ensures backwards compatibility. +// +// in the same commit, an explicit fisher-yates implementation was added in place of the unspecified +// behavior of that function. If this test begins to fail at some point in the future, it can simply +// be removed as the desired behavior has been preserved. +quickcheck! { + fn rng_shuffle_equals_fisher_yates(x: Vec, seed_base: u8) -> bool { + let mut rng1: ChaCha20Rng = SeedableRng::from_seed([seed_base; 32]); + let mut rng2: ChaCha20Rng = SeedableRng::from_seed([seed_base; 32]); + + let mut data1 = x.clone(); + let mut data2 = x; + + data1.shuffle(&mut rng1); + crate::fisher_yates_shuffle(&mut rng2, &mut data2[..]); + data1 == data2 + } +} -- GitLab From 52132636d99e61b9bd9718f6afb28972b7cd3cde Mon Sep 17 00:00:00 2001 From: Parth Date: Fri, 1 Dec 2023 04:31:02 +0400 Subject: [PATCH 173/199] Add `recorded_keys` function to get recorded keys from the proof recorder (#2561) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description - What does this PR do? This PR adds function to get recorded keys from proof recorder instance - Why are these changes needed? This change is required to get the keys accessed by the trie backend during the runtime execution. The keys are already tracked by proof recorder, just aren't exposed publicly. - How were these changes implemented and what do they affect? The changes are implemented by adding a public function in proof recorder that simply clones the `recorded_keys` field. It is pure addition of function and AFAIK does not affect anything. # Checklist - [x] My PR includes a detailed description as outlined in the "Description" section above - [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process) of this project (at minimum one label for `T` required) - [ ] I have made corresponding changes to the documentation (if applicable) - [ ] I have added tests that prove my fix is effective or that my feature works (if applicable) --------- Co-authored-by: Bastian Köcher --- substrate/primitives/trie/src/recorder.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index b236f281bb1..22a22b33b37 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -107,6 +107,13 @@ impl Clone for Recorder { } impl Recorder { + /// Returns [`RecordedForKey`] per recorded key per trie. + /// + /// There are multiple tries when working with e.g. child tries. + pub fn recorded_keys(&self) -> HashMap<::Out, HashMap, RecordedForKey>> { + self.inner.lock().recorded_keys.clone() + } + /// Returns the recorder as [`TrieRecorder`](trie_db::TrieRecorder) compatible type. /// /// - `storage_root`: The storage root of the trie for which accesses are recorded. This is -- GitLab From 4a293bc5a25be637c06ce950a34490706597615b Mon Sep 17 00:00:00 2001 From: Liam Aharon Date: Fri, 1 Dec 2023 11:38:02 +0400 Subject: [PATCH 174/199] Enforce consistent and correct toml formatting (#2518) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using taplo, fixes all our broken and inconsistent toml formatting and adds CI to keep them tidy. If people want we can customise the format rules as described here https://taplo.tamasfe.dev/configuration/formatter-options.html @ggwpez, I suggest zepter is used only for checking features are propagated, and leave formatting for taplo to avoid duplicate work and conflicts. TODO - [x] Use `exclude = [...]` syntax in taplo file to ignore zombienet tests instead of deleting the dir --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- .cargo/config.toml | 52 ++--- .config/lychee.toml | 56 ++--- .config/taplo.toml | 33 +++ .config/zepter.yaml | 5 +- .gitlab/pipeline/check.yml | 10 + .gitlab/spellcheck.toml | 18 +- Cargo.toml | 28 +-- bridges/bin/runtime-common/Cargo.toml | 4 +- bridges/modules/grandpa/Cargo.toml | 2 +- bridges/modules/messages/Cargo.toml | 2 +- bridges/modules/parachains/Cargo.toml | 2 +- bridges/modules/relayers/Cargo.toml | 2 +- .../modules/xcm-bridge-hub-router/Cargo.toml | 2 +- .../chain-asset-hub-rococo/Cargo.toml | 2 +- .../chain-asset-hub-westend/Cargo.toml | 2 +- .../chain-bridge-hub-cumulus/Cargo.toml | 2 +- .../chain-bridge-hub-kusama/Cargo.toml | 2 +- .../chain-bridge-hub-polkadot/Cargo.toml | 2 +- .../chain-bridge-hub-rococo/Cargo.toml | 2 +- .../chain-bridge-hub-westend/Cargo.toml | 2 +- bridges/primitives/chain-kusama/Cargo.toml | 2 +- .../chain-polkadot-bulletin/Cargo.toml | 2 +- bridges/primitives/chain-polkadot/Cargo.toml | 2 +- bridges/primitives/chain-rococo/Cargo.toml | 2 +- bridges/primitives/chain-westend/Cargo.toml | 2 +- bridges/primitives/header-chain/Cargo.toml | 2 +- bridges/primitives/messages/Cargo.toml | 4 +- bridges/primitives/parachains/Cargo.toml | 2 +- bridges/primitives/polkadot-core/Cargo.toml | 2 +- bridges/primitives/relayers/Cargo.toml | 4 +- bridges/primitives/runtime/Cargo.toml | 2 +- bridges/primitives/test-utils/Cargo.toml | 6 +- .../xcm-bridge-hub-router/Cargo.toml | 6 +- cumulus/client/collator/Cargo.toml | 2 +- cumulus/client/consensus/aura/Cargo.toml | 2 +- cumulus/client/consensus/common/Cargo.toml | 2 +- cumulus/client/network/Cargo.toml | 2 +- cumulus/client/pov-recovery/Cargo.toml | 2 +- .../Cargo.toml | 2 +- .../relay-chain-minimal-node/Cargo.toml | 1 - .../relay-chain-rpc-interface/Cargo.toml | 2 +- cumulus/client/service/Cargo.toml | 1 - cumulus/pallets/aura-ext/Cargo.toml | 18 +- cumulus/pallets/collator-selection/Cargo.toml | 20 +- cumulus/pallets/dmp-queue/Cargo.toml | 4 +- cumulus/pallets/parachain-system/Cargo.toml | 30 +-- .../parachain-system/proc-macro/Cargo.toml | 2 +- .../pallets/session-benchmarking/Cargo.toml | 14 +- cumulus/pallets/solo-to-para/Cargo.toml | 16 +- cumulus/pallets/xcm/Cargo.toml | 14 +- cumulus/pallets/xcmp-queue/Cargo.toml | 18 +- cumulus/parachain-template/node/Cargo.toml | 3 +- .../pallets/template/Cargo.toml | 14 +- cumulus/parachain-template/runtime/Cargo.toml | 72 +++---- cumulus/parachains/common/Cargo.toml | 20 +- .../emulated/common/Cargo.toml | 28 +-- .../tests/assets/asset-hub-rococo/Cargo.toml | 18 +- .../tests/assets/asset-hub-westend/Cargo.toml | 28 +-- .../bridges/bridge-hub-rococo/Cargo.toml | 16 +- .../bridges/bridge-hub-westend/Cargo.toml | 16 +- .../pallets/collective-content/Cargo.toml | 2 +- .../pallets/parachain-info/Cargo.toml | 10 +- cumulus/parachains/pallets/ping/Cargo.toml | 12 +- .../assets/asset-hub-rococo/Cargo.toml | 31 +-- .../assets/asset-hub-westend/Cargo.toml | 8 +- .../runtimes/assets/common/Cargo.toml | 18 +- .../runtimes/assets/test-utils/Cargo.toml | 24 +-- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 6 +- .../bridge-hubs/bridge-hub-westend/Cargo.toml | 6 +- .../bridge-hubs/test-utils/Cargo.toml | 30 +-- .../collectives-westend/Cargo.toml | 15 +- .../contracts/contracts-rococo/Cargo.toml | 70 +++---- .../glutton/glutton-westend/Cargo.toml | 56 ++--- .../runtimes/starters/seedling/Cargo.toml | 38 ++-- .../runtimes/starters/shell/Cargo.toml | 40 ++-- .../parachains/runtimes/test-utils/Cargo.toml | 34 +-- .../runtimes/testing/penpal/Cargo.toml | 80 +++---- .../testing/rococo-parachain/Cargo.toml | 62 +++--- cumulus/parachains/testnets-common/Cargo.toml | 8 +- cumulus/polkadot-parachain/Cargo.toml | 4 +- cumulus/primitives/aura/Cargo.toml | 16 +- cumulus/primitives/core/Cargo.toml | 20 +- .../primitives/parachain-inherent/Cargo.toml | 22 +- .../proof-size-hostfunction/Cargo.toml | 4 +- cumulus/primitives/timestamp/Cargo.toml | 10 +- cumulus/primitives/utility/Cargo.toml | 16 +- cumulus/test/client/Cargo.toml | 2 +- cumulus/test/relay-sproof-builder/Cargo.toml | 14 +- cumulus/test/runtime/Cargo.toml | 46 ++-- cumulus/test/service/Cargo.toml | 6 +- developer-hub/Cargo.toml | 4 +- docs/CONTRIBUTING.md | 4 + docs/STYLE_GUIDE.md | 34 +-- polkadot/Cargo.toml | 24 +-- polkadot/cli/Cargo.toml | 20 +- polkadot/core-primitives/Cargo.toml | 4 +- polkadot/erasure-coding/Cargo.toml | 2 +- polkadot/node/collation-generation/Cargo.toml | 2 +- polkadot/node/core/approval-voting/Cargo.toml | 3 +- polkadot/node/core/provisioner/Cargo.toml | 2 +- polkadot/node/core/pvf/Cargo.toml | 4 +- .../node/core/pvf/execute-worker/Cargo.toml | 2 +- .../node/core/pvf/prepare-worker/Cargo.toml | 2 +- polkadot/node/core/runtime-api/Cargo.toml | 2 +- polkadot/node/gum/proc-macro/Cargo.toml | 2 +- polkadot/node/malus/Cargo.toml | 4 +- polkadot/node/metrics/Cargo.toml | 6 +- .../network/approval-distribution/Cargo.toml | 3 +- polkadot/node/overseer/Cargo.toml | 14 +- polkadot/node/service/Cargo.toml | 10 +- polkadot/node/subsystem-types/Cargo.toml | 2 +- polkadot/node/subsystem-util/Cargo.toml | 4 +- polkadot/node/test/client/Cargo.toml | 2 +- polkadot/node/test/service/Cargo.toml | 4 +- polkadot/parachain/Cargo.toml | 8 +- polkadot/parachain/test-parachains/Cargo.toml | 4 +- .../test-parachains/adder/Cargo.toml | 10 +- .../parachain/test-parachains/halt/Cargo.toml | 2 +- .../test-parachains/undying/Cargo.toml | 8 +- .../undying/collator/Cargo.toml | 2 +- polkadot/primitives/Cargo.toml | 4 +- polkadot/roadmap/implementers-guide/book.toml | 2 +- polkadot/roadmap/phase-1.toml | 14 +- polkadot/runtime/common/Cargo.toml | 10 +- .../common/slot_range_helper/Cargo.toml | 4 +- polkadot/runtime/metrics/Cargo.toml | 6 +- polkadot/runtime/parachains/Cargo.toml | 12 +- polkadot/runtime/rococo/Cargo.toml | 12 +- polkadot/runtime/rococo/constants/Cargo.toml | 2 +- polkadot/runtime/test-runtime/Cargo.toml | 2 +- .../runtime/test-runtime/constants/Cargo.toml | 2 +- polkadot/runtime/westend/Cargo.toml | 10 +- polkadot/runtime/westend/constants/Cargo.toml | 2 +- polkadot/xcm/Cargo.toml | 6 +- polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml | 14 +- polkadot/xcm/pallet-xcm/Cargo.toml | 14 +- polkadot/xcm/xcm-builder/Cargo.toml | 2 +- polkadot/xcm/xcm-executor/Cargo.toml | 2 +- .../xcm-executor/integration-tests/Cargo.toml | 4 +- substrate/bin/minimal/node/Cargo.toml | 8 +- substrate/bin/minimal/runtime/Cargo.toml | 8 +- substrate/bin/node-template/node/Cargo.toml | 6 +- .../node-template/pallets/template/Cargo.toml | 10 +- .../bin/node-template/runtime/Cargo.toml | 46 ++-- .../bin/node-template/rust-toolchain.toml | 4 +- substrate/bin/node/bench/Cargo.toml | 2 +- substrate/bin/node/cli/Cargo.toml | 28 +-- substrate/bin/node/inspect/Cargo.toml | 2 +- substrate/bin/node/primitives/Cargo.toml | 8 +- substrate/bin/node/runtime/Cargo.toml | 196 +++++++++--------- substrate/bin/node/testing/Cargo.toml | 8 +- substrate/client/api/Cargo.toml | 4 +- .../client/authority-discovery/Cargo.toml | 4 +- substrate/client/chain-spec/Cargo.toml | 2 +- substrate/client/cli/Cargo.toml | 12 +- .../client/consensus/babe/rpc/Cargo.toml | 2 +- .../client/consensus/beefy/rpc/Cargo.toml | 4 +- substrate/client/consensus/common/Cargo.toml | 2 +- .../client/consensus/grandpa/rpc/Cargo.toml | 4 +- .../client/consensus/manual-seal/Cargo.toml | 4 +- substrate/client/db/Cargo.toml | 2 +- substrate/client/executor/Cargo.toml | 2 +- .../client/executor/runtime-test/Cargo.toml | 12 +- substrate/client/executor/wasmtime/Cargo.toml | 4 +- .../merkle-mountain-range/rpc/Cargo.toml | 2 +- substrate/client/network-gossip/Cargo.toml | 2 +- substrate/client/network/Cargo.toml | 4 +- substrate/client/network/bitswap/Cargo.toml | 2 +- substrate/client/network/light/Cargo.toml | 2 +- substrate/client/network/sync/Cargo.toml | 2 +- substrate/client/network/test/Cargo.toml | 2 +- substrate/client/offchain/Cargo.toml | 4 +- substrate/client/rpc-api/Cargo.toml | 2 +- substrate/client/rpc-spec-v2/Cargo.toml | 4 +- substrate/client/service/Cargo.toml | 8 +- substrate/client/service/test/Cargo.toml | 4 +- substrate/client/storage-monitor/Cargo.toml | 2 +- substrate/client/sync-state-rpc/Cargo.toml | 2 +- .../client/tracing/proc-macro/Cargo.toml | 2 +- .../client/transaction-pool/api/Cargo.toml | 4 +- substrate/client/utils/Cargo.toml | 4 +- substrate/frame/Cargo.toml | 10 +- substrate/frame/alliance/Cargo.toml | 16 +- substrate/frame/asset-conversion/Cargo.toml | 20 +- substrate/frame/asset-rate/Cargo.toml | 16 +- substrate/frame/assets/Cargo.toml | 12 +- substrate/frame/atomic-swap/Cargo.toml | 14 +- substrate/frame/aura/Cargo.toml | 18 +- .../frame/authority-discovery/Cargo.toml | 16 +- substrate/frame/authorship/Cargo.toml | 10 +- substrate/frame/babe/Cargo.toml | 20 +- substrate/frame/bags-list/Cargo.toml | 2 +- substrate/frame/bags-list/fuzzer/Cargo.toml | 6 +- .../frame/bags-list/remote-tests/Cargo.toml | 2 +- substrate/frame/balances/Cargo.toml | 12 +- substrate/frame/beefy-mmr/Cargo.toml | 28 +-- substrate/frame/beefy/Cargo.toml | 16 +- substrate/frame/benchmarking/Cargo.toml | 24 +-- substrate/frame/benchmarking/pov/Cargo.toml | 14 +- substrate/frame/bounties/Cargo.toml | 18 +- substrate/frame/broker/Cargo.toml | 18 +- substrate/frame/child-bounties/Cargo.toml | 20 +- substrate/frame/collective/Cargo.toml | 16 +- substrate/frame/contracts/Cargo.toml | 26 +-- substrate/frame/contracts/fixtures/Cargo.toml | 2 - .../fixtures/contracts/common/Cargo.toml | 1 - .../frame/contracts/mock-network/Cargo.toml | 36 ++-- .../frame/contracts/primitives/Cargo.toml | 33 +++ substrate/frame/contracts/uapi/Cargo.toml | 7 +- substrate/frame/conviction-voting/Cargo.toml | 14 +- substrate/frame/core-fellowship/Cargo.toml | 18 +- substrate/frame/democracy/Cargo.toml | 16 +- .../election-provider-multi-phase/Cargo.toml | 26 +-- .../test-staking-e2e/Cargo.toml | 2 +- .../election-provider-support/Cargo.toml | 18 +- .../benchmarking/Cargo.toml | 14 +- .../solution-type/fuzzer/Cargo.toml | 4 +- substrate/frame/elections-phragmen/Cargo.toml | 20 +- substrate/frame/examples/Cargo.toml | 14 +- substrate/frame/examples/basic/Cargo.toml | 18 +- .../frame/examples/default-config/Cargo.toml | 12 +- substrate/frame/examples/dev-mode/Cargo.toml | 16 +- .../frame/examples/frame-crate/Cargo.toml | 6 +- .../frame/examples/kitchensink/Cargo.toml | 18 +- .../frame/examples/offchain-worker/Cargo.toml | 16 +- substrate/frame/examples/split/Cargo.toml | 16 +- substrate/frame/executive/Cargo.toml | 18 +- substrate/frame/fast-unstake/Cargo.toml | 20 +- substrate/frame/glutton/Cargo.toml | 16 +- substrate/frame/grandpa/Cargo.toml | 18 +- substrate/frame/identity/Cargo.toml | 14 +- substrate/frame/im-online/Cargo.toml | 14 +- substrate/frame/indices/Cargo.toml | 18 +- .../Cargo.toml | 10 +- substrate/frame/lottery/Cargo.toml | 12 +- substrate/frame/membership/Cargo.toml | 12 +- .../frame/merkle-mountain-range/Cargo.toml | 18 +- substrate/frame/message-queue/Cargo.toml | 20 +- substrate/frame/mixnet/Cargo.toml | 2 +- substrate/frame/multisig/Cargo.toml | 14 +- .../frame/nft-fractionalization/Cargo.toml | 16 +- substrate/frame/nfts/Cargo.toml | 16 +- substrate/frame/nfts/runtime-api/Cargo.toml | 4 +- substrate/frame/nicks/Cargo.toml | 12 +- substrate/frame/nis/Cargo.toml | 16 +- substrate/frame/node-authorization/Cargo.toml | 14 +- substrate/frame/nomination-pools/Cargo.toml | 18 +- .../nomination-pools/benchmarking/Cargo.toml | 26 +-- .../nomination-pools/runtime-api/Cargo.toml | 10 +- substrate/frame/offences/Cargo.toml | 14 +- .../frame/offences/benchmarking/Cargo.toml | 30 +-- substrate/frame/paged-list/Cargo.toml | 18 +- substrate/frame/paged-list/fuzzer/Cargo.toml | 6 +- substrate/frame/preimage/Cargo.toml | 18 +- substrate/frame/proxy/Cargo.toml | 14 +- substrate/frame/ranked-collective/Cargo.toml | 18 +- substrate/frame/recovery/Cargo.toml | 16 +- substrate/frame/referenda/Cargo.toml | 16 +- substrate/frame/remark/Cargo.toml | 18 +- substrate/frame/root-offences/Cargo.toml | 16 +- substrate/frame/root-testing/Cargo.toml | 14 +- substrate/frame/safe-mode/Cargo.toml | 18 +- substrate/frame/salary/Cargo.toml | 18 +- substrate/frame/scheduler/Cargo.toml | 18 +- substrate/frame/scored-pool/Cargo.toml | 12 +- substrate/frame/session/Cargo.toml | 20 +- .../frame/session/benchmarking/Cargo.toml | 18 +- substrate/frame/society/Cargo.toml | 16 +- substrate/frame/staking/Cargo.toml | 18 +- substrate/frame/staking/reward-fn/Cargo.toml | 6 +- .../frame/staking/runtime-api/Cargo.toml | 4 +- .../frame/state-trie-migration/Cargo.toml | 20 +- substrate/frame/statement/Cargo.toml | 20 +- substrate/frame/sudo/Cargo.toml | 14 +- substrate/frame/support/Cargo.toml | 38 ++-- substrate/frame/support/procedural/Cargo.toml | 2 +- .../frame/support/procedural/tools/Cargo.toml | 2 +- .../procedural/tools/derive/Cargo.toml | 2 +- substrate/frame/support/test/Cargo.toml | 34 +-- .../support/test/compile_pass/Cargo.toml | 12 +- .../frame/support/test/pallet/Cargo.toml | 8 +- .../support/test/stg_frame_crate/Cargo.toml | 6 +- substrate/frame/system/Cargo.toml | 12 +- .../frame/system/benchmarking/Cargo.toml | 14 +- .../frame/system/rpc/runtime-api/Cargo.toml | 6 +- substrate/frame/timestamp/Cargo.toml | 20 +- substrate/frame/tips/Cargo.toml | 18 +- .../frame/transaction-payment/Cargo.toml | 14 +- .../asset-conversion-tx-payment/Cargo.toml | 20 +- .../asset-tx-payment/Cargo.toml | 18 +- .../frame/transaction-payment/rpc/Cargo.toml | 2 +- .../rpc/runtime-api/Cargo.toml | 10 +- .../skip-feeless-payment/Cargo.toml | 10 +- .../frame/transaction-storage/Cargo.toml | 24 +-- substrate/frame/treasury/Cargo.toml | 16 +- substrate/frame/try-runtime/Cargo.toml | 14 +- substrate/frame/tx-pause/Cargo.toml | 16 +- substrate/frame/uniques/Cargo.toml | 12 +- substrate/frame/utility/Cargo.toml | 16 +- substrate/frame/vesting/Cargo.toml | 14 +- substrate/frame/whitelist/Cargo.toml | 14 +- substrate/primitives/api/Cargo.toml | 8 +- .../primitives/api/proc-macro/Cargo.toml | 6 +- .../primitives/application-crypto/Cargo.toml | 14 +- .../application-crypto/test/Cargo.toml | 4 +- substrate/primitives/arithmetic/Cargo.toml | 10 +- .../primitives/authority-discovery/Cargo.toml | 10 +- substrate/primitives/block-builder/Cargo.toml | 12 +- .../primitives/consensus/aura/Cargo.toml | 16 +- .../primitives/consensus/babe/Cargo.toml | 20 +- .../primitives/consensus/beefy/Cargo.toml | 4 +- .../primitives/consensus/grandpa/Cargo.toml | 16 +- substrate/primitives/consensus/pow/Cargo.toml | 10 +- .../primitives/consensus/sassafras/Cargo.toml | 2 +- .../primitives/consensus/slots/Cargo.toml | 10 +- substrate/primitives/core/Cargo.toml | 26 +-- substrate/primitives/core/hashing/Cargo.toml | 2 +- .../core/hashing/proc-macro/Cargo.toml | 2 +- .../primitives/crypto/ec-utils/Cargo.toml | 12 +- substrate/primitives/debug-derive/Cargo.toml | 2 +- substrate/primitives/externalities/Cargo.toml | 8 +- .../primitives/genesis-builder/Cargo.toml | 10 +- substrate/primitives/inherents/Cargo.toml | 6 +- substrate/primitives/io/Cargo.toml | 28 +-- substrate/primitives/keyring/Cargo.toml | 2 +- substrate/primitives/keystore/Cargo.toml | 12 +- .../merkle-mountain-range/Cargo.toml | 16 +- substrate/primitives/metadata-ir/Cargo.toml | 6 +- substrate/primitives/mixnet/Cargo.toml | 2 +- .../primitives/npos-elections/Cargo.toml | 12 +- .../npos-elections/fuzzer/Cargo.toml | 2 +- substrate/primitives/offchain/Cargo.toml | 10 +- .../primitives/runtime-interface/Cargo.toml | 10 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../test-wasm-deprecated/Cargo.toml | 8 +- .../runtime-interface/test-wasm/Cargo.toml | 10 +- substrate/primitives/runtime/Cargo.toml | 16 +- substrate/primitives/session/Cargo.toml | 12 +- substrate/primitives/staking/Cargo.toml | 12 +- substrate/primitives/state-machine/Cargo.toml | 12 +- .../primitives/statement-store/Cargo.toml | 18 +- substrate/primitives/std/Cargo.toml | 2 +- substrate/primitives/storage/Cargo.toml | 10 +- .../primitives/test-primitives/Cargo.toml | 10 +- substrate/primitives/timestamp/Cargo.toml | 8 +- substrate/primitives/tracing/Cargo.toml | 6 +- .../primitives/transaction-pool/Cargo.toml | 8 +- .../transaction-storage-proof/Cargo.toml | 12 +- substrate/primitives/trie/Cargo.toml | 6 +- substrate/primitives/version/Cargo.toml | 12 +- .../primitives/version/proc-macro/Cargo.toml | 4 +- .../primitives/wasm-interface/Cargo.toml | 8 +- substrate/primitives/weights/Cargo.toml | 14 +- substrate/scripts/ci/deny.toml | 134 ++++++------ substrate/test-utils/Cargo.toml | 2 +- substrate/test-utils/cli/Cargo.toml | 2 +- substrate/test-utils/client/Cargo.toml | 4 +- substrate/test-utils/runtime/Cargo.toml | 52 ++--- substrate/utils/binary-merkle-tree/Cargo.toml | 6 +- .../utils/frame/benchmarking-cli/Cargo.toml | 10 +- .../rpc/state-trie-migration-rpc/Cargo.toml | 2 +- substrate/utils/frame/rpc/support/Cargo.toml | 2 +- substrate/utils/frame/rpc/system/Cargo.toml | 2 +- .../utils/frame/try-runtime/cli/Cargo.toml | 4 +- 364 files changed, 2325 insertions(+), 2287 deletions(-) create mode 100644 .config/taplo.toml create mode 100644 substrate/frame/contracts/primitives/Cargo.toml diff --git a/.cargo/config.toml b/.cargo/config.toml index bd46659f799..8bbf7cdb4d9 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,7 +1,7 @@ [build] rustdocflags = [ - "-Dwarnings", - "-Arustdoc::redundant_explicit_links", # stylistic + "-Dwarnings", + "-Arustdoc::redundant_explicit_links", # stylistic ] # An auto defined `clippy` feature was introduced, @@ -12,30 +12,30 @@ rustdocflags = [ # RUSTFLAGS= cargo clippy [target.'cfg(feature = "cargo-clippy")'] rustflags = [ - "-Aclippy::all", - "-Dclippy::correctness", - "-Aclippy::if-same-then-else", - "-Asuspicious_double_ref_op", - "-Dclippy::complexity", - "-Aclippy::zero-prefixed-literal", # 00_1000_000 - "-Aclippy::type_complexity", # raison d'etre - "-Aclippy::nonminimal-bool", # maybe - "-Aclippy::borrowed-box", # Reasonable to fix this one - "-Aclippy::too-many-arguments", # (Turning this on would lead to) - "-Aclippy::unnecessary_cast", # Types may change - "-Aclippy::identity-op", # One case where we do 0 + - "-Aclippy::useless_conversion", # Types may change - "-Aclippy::unit_arg", # styalistic. - "-Aclippy::option-map-unit-fn", # styalistic - "-Aclippy::bind_instead_of_map", # styalistic - "-Aclippy::erasing_op", # E.g. 0 * DOLLARS - "-Aclippy::eq_op", # In tests we test equality. - "-Aclippy::while_immutable_condition", # false positives - "-Aclippy::needless_option_as_deref", # false positives - "-Aclippy::derivable_impls", # false positives - "-Aclippy::stable_sort_primitive", # prefer stable sort - "-Aclippy::extra-unused-type-parameters", # stylistic - "-Aclippy::default_constructed_unit_structs", # stylistic + "-Aclippy::all", + "-Dclippy::correctness", + "-Aclippy::if-same-then-else", + "-Asuspicious_double_ref_op", + "-Dclippy::complexity", + "-Aclippy::zero-prefixed-literal", # 00_1000_000 + "-Aclippy::type_complexity", # raison d'etre + "-Aclippy::nonminimal-bool", # maybe + "-Aclippy::borrowed-box", # Reasonable to fix this one + "-Aclippy::too-many-arguments", # (Turning this on would lead to) + "-Aclippy::unnecessary_cast", # Types may change + "-Aclippy::identity-op", # One case where we do 0 + + "-Aclippy::useless_conversion", # Types may change + "-Aclippy::unit_arg", # styalistic. + "-Aclippy::option-map-unit-fn", # styalistic + "-Aclippy::bind_instead_of_map", # styalistic + "-Aclippy::erasing_op", # E.g. 0 * DOLLARS + "-Aclippy::eq_op", # In tests we test equality. + "-Aclippy::while_immutable_condition", # false positives + "-Aclippy::needless_option_as_deref", # false positives + "-Aclippy::derivable_impls", # false positives + "-Aclippy::stable_sort_primitive", # prefer stable sort + "-Aclippy::extra-unused-type-parameters", # stylistic + "-Aclippy::default_constructed_unit_structs", # stylistic ] [env] diff --git a/.config/lychee.toml b/.config/lychee.toml index 9b2ae069931..72c1e66a4df 100644 --- a/.config/lychee.toml +++ b/.config/lychee.toml @@ -15,36 +15,36 @@ accept = [ 200, # Rate limited - GitHub likes to throw this. - 429 + 429, ] -exclude_path = [ "./target" ] +exclude_path = ["./target"] exclude = [ - # Place holders (no need to fix these): - "http://visitme/", - "https://visitme/", - - # TODO - "https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs", - "https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html", - "https://github.com/ipfs/js-ipfs-bitswap/blob/", - "https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp", - "https://github.com/paritytech/substrate/frame/fast-unstake", - "https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs", - "https://polkadot.network/the-path-of-a-parachain-block/", - "https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results", - "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html", - "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html#-6.-practical-results", - "https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology", - "https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html", - "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html", - "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model", - "https://research.web3.foundation/en/latest/polkadot/slashing/npos.html", - "https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model", - "https://rpc.polkadot.io/", - "https://w3f.github.io/parachain-implementers-guide/node/approval/approval-distribution.html", - "https://w3f.github.io/parachain-implementers-guide/node/index.html", - "https://w3f.github.io/parachain-implementers-guide/protocol-chain-selection.html", - "https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html", + # Place holders (no need to fix these): + "http://visitme/", + "https://visitme/", + + # TODO + "https://docs.substrate.io/main-docs/build/custom-rpc/#public-rpcs", + "https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html", + "https://github.com/ipfs/js-ipfs-bitswap/blob/", + "https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp", + "https://github.com/paritytech/substrate/frame/fast-unstake", + "https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs", + "https://polkadot.network/the-path-of-a-parachain-block/", + "https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results", + "https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html", + "https://research.web3.foundation/en/latest/polkadot/Token%20Economics.html#inflation-model", + "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html", + "https://research.web3.foundation/en/latest/polkadot/block-production/Babe.html#-6.-practical-results", + "https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology", + "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html", + "https://research.web3.foundation/en/latest/polkadot/overview/2-token-economics.html#inflation-model", + "https://research.web3.foundation/en/latest/polkadot/slashing/npos.html", + "https://rpc.polkadot.io/", + "https://w3f.github.io/parachain-implementers-guide/node/approval/approval-distribution.html", + "https://w3f.github.io/parachain-implementers-guide/node/index.html", + "https://w3f.github.io/parachain-implementers-guide/protocol-chain-selection.html", + "https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html", ] diff --git a/.config/taplo.toml b/.config/taplo.toml new file mode 100644 index 00000000000..ffe0417e42b --- /dev/null +++ b/.config/taplo.toml @@ -0,0 +1,33 @@ +# all options https://taplo.tamasfe.dev/configuration/formatter-options.html + +# ignore zombienet as they do some deliberate custom toml stuff +exclude = [ + "cumulus/zombienet/**", + "polkadot/node/malus/integrationtests/**", + "polkadot/zombienet_tests/**", + "substrate/zombienet/**", +] + +# global rules +[formatting] +reorder_arrays = true +inline_table_expand = false +array_auto_expand = false +array_auto_collapse = false +indent_string = " " # tab + +# don't re-order order-dependent deb package metadata +[[rule]] +include = ["polkadot/Cargo.toml"] +keys = ["package.metadata.deb"] + +[rule.formatting] +reorder_arrays = false + +# don't re-order order-dependent rustflags +[[rule]] +include = [".cargo/config.toml"] +keys = ["build", "target.'cfg(feature = \"cargo-clippy\")'"] + +[rule.formatting] +reorder_arrays = false diff --git a/.config/zepter.yaml b/.config/zepter.yaml index 33bf3a044cf..f701392d16b 100644 --- a/.config/zepter.yaml +++ b/.config/zepter.yaml @@ -19,18 +19,15 @@ workflows: '--left-side-outside-workspace=ignore', # Some features imply that they activate a specific dependency as non-optional. Otherwise the default behaviour with a `?` is used. '--feature-enables-dep=try-runtime:frame-try-runtime,runtime-benchmarks:frame-benchmarking', - # Actually modify the files and not just report the issues: + # Auxillary flags: '--offline', '--locked', '--show-path', '--quiet', ] - # Format the features into canonical format: - - ['format', 'features', '--offline', '--locked', '--quiet'] # Same as `check`, but with the `--fix` flag. default: - [ $check.0, '--fix' ] - - [ $check.1, '--fix' ] # Will be displayed when any workflow fails: help: diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 3ef3865aa43..98435248eb4 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -90,6 +90,16 @@ check-rust-feature-propagation: - cargo install --locked --version 0.13.3 -q -f zepter && zepter --version - zepter run check +check-toml-format: + stage: check + extends: + - .kubernetes-env + - .common-refs + script: + - cargo install taplo-cli --locked --version 0.8.1 + - taplo format --check --config .config/taplo.toml + - echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" + # More info can be found here: https://github.com/paritytech/polkadot/pull/5865 .check-runtime-migration: stage: check diff --git a/.gitlab/spellcheck.toml b/.gitlab/spellcheck.toml index 025c7a0a461..8c60bf6915d 100644 --- a/.gitlab/spellcheck.toml +++ b/.gitlab/spellcheck.toml @@ -8,20 +8,20 @@ use_builtin = true [hunspell.quirks] # He tagged it as 'TheGreatestOfAllTimes' transform_regex = [ -# `Type`'s + # `Type`'s "^'([^\\s])'$", -# 5x -# 10.7% + # 5x + # 10.7% "^[0-9_]+(?:\\.[0-9]*)?(x|%)$", -# Transforms' + # Transforms' "^(.*)'$", -# backslashes - "^\\+$", + # backslashes "^[0-9]*+k|MB|Mb|ms|Mbit|nd|th|rd$", -# single char `=` `>` `%` .. + "^\\+$", + # single char `=` `>` `%` .. "^=|>|<|%$", -# 22_100 - "^(?:[0-9]+_)+[0-9]+$" + # 22_100 + "^(?:[0-9]+_)+[0-9]+$", ] allow_concatenation = true allow_dashes = true diff --git a/Cargo.toml b/Cargo.toml index b585ceb5b44..0a7bf912e48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,6 @@ license = "GPL-3.0-only" resolver = "2" members = [ - "developer-hub", "bridges/bin/runtime-common", "bridges/modules/grandpa", "bridges/modules/messages", @@ -23,8 +22,8 @@ members = [ "bridges/primitives/chain-bridge-hub-rococo", "bridges/primitives/chain-bridge-hub-westend", "bridges/primitives/chain-kusama", - "bridges/primitives/chain-polkadot-bulletin", "bridges/primitives/chain-polkadot", + "bridges/primitives/chain-polkadot-bulletin", "bridges/primitives/chain-rococo", "bridges/primitives/chain-westend", "bridges/primitives/header-chain", @@ -70,6 +69,7 @@ members = [ "cumulus/parachains/integration-tests/emulated/chains/relays/westend", "cumulus/parachains/integration-tests/emulated/common", "cumulus/parachains/integration-tests/emulated/networks/rococo-system", + "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system", "cumulus/parachains/integration-tests/emulated/networks/westend-system", "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo", "cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend", @@ -106,6 +106,7 @@ members = [ "cumulus/test/runtime", "cumulus/test/service", "cumulus/xcm/xcm-emulator", + "developer-hub", "polkadot", "polkadot/cli", "polkadot/core-primitives", @@ -123,8 +124,8 @@ members = [ "polkadot/node/core/parachains-inherent", "polkadot/node/core/prospective-parachains", "polkadot/node/core/provisioner", - "polkadot/node/core/pvf-checker", "polkadot/node/core/pvf", + "polkadot/node/core/pvf-checker", "polkadot/node/core/pvf/common", "polkadot/node/core/pvf/execute-worker", "polkadot/node/core/pvf/prepare-worker", @@ -147,10 +148,10 @@ members = [ "polkadot/node/overseer", "polkadot/node/primitives", "polkadot/node/service", + "polkadot/node/subsystem", "polkadot/node/subsystem-test-helpers", "polkadot/node/subsystem-types", "polkadot/node/subsystem-util", - "polkadot/node/subsystem", "polkadot/node/test/client", "polkadot/node/test/service", "polkadot/node/tracking-allocator", @@ -179,8 +180,8 @@ members = [ "polkadot/utils/generate-bags", "polkadot/utils/remote-ext-tests/bags-list", "polkadot/xcm", - "polkadot/xcm/pallet-xcm-benchmarks", "polkadot/xcm/pallet-xcm", + "polkadot/xcm/pallet-xcm-benchmarks", "polkadot/xcm/procedural", "polkadot/xcm/xcm-builder", "polkadot/xcm/xcm-executor", @@ -232,8 +233,8 @@ members = [ "substrate/client/merkle-mountain-range", "substrate/client/merkle-mountain-range/rpc", "substrate/client/mixnet", - "substrate/client/network-gossip", "substrate/client/network", + "substrate/client/network-gossip", "substrate/client/network/bitswap", "substrate/client/network/common", "substrate/client/network/light", @@ -243,10 +244,10 @@ members = [ "substrate/client/network/transactions", "substrate/client/offchain", "substrate/client/proposer-metrics", + "substrate/client/rpc", "substrate/client/rpc-api", "substrate/client/rpc-servers", "substrate/client/rpc-spec-v2", - "substrate/client/rpc", "substrate/client/service", "substrate/client/service/test", "substrate/client/state-db", @@ -274,8 +275,8 @@ members = [ "substrate/frame/bags-list/fuzzer", "substrate/frame/bags-list/remote-tests", "substrate/frame/balances", - "substrate/frame/beefy-mmr", "substrate/frame/beefy", + "substrate/frame/beefy-mmr", "substrate/frame/benchmarking", "substrate/frame/benchmarking/pov", "substrate/frame/bounties", @@ -285,9 +286,9 @@ members = [ "substrate/frame/contracts", "substrate/frame/contracts/fixtures", "substrate/frame/contracts/fixtures/contracts/common", - "substrate/frame/contracts/uapi", "substrate/frame/contracts/mock-network", "substrate/frame/contracts/proc-macro", + "substrate/frame/contracts/uapi", "substrate/frame/conviction-voting", "substrate/frame/core-fellowship", "substrate/frame/democracy", @@ -424,12 +425,12 @@ members = [ "substrate/primitives/offchain", "substrate/primitives/panic-handler", "substrate/primitives/rpc", + "substrate/primitives/runtime", "substrate/primitives/runtime-interface", "substrate/primitives/runtime-interface/proc-macro", - "substrate/primitives/runtime-interface/test-wasm-deprecated", - "substrate/primitives/runtime-interface/test-wasm", "substrate/primitives/runtime-interface/test", - "substrate/primitives/runtime", + "substrate/primitives/runtime-interface/test-wasm", + "substrate/primitives/runtime-interface/test-wasm-deprecated", "substrate/primitives/session", "substrate/primitives/staking", "substrate/primitives/state-machine", @@ -468,9 +469,8 @@ members = [ "substrate/utils/frame/try-runtime/cli", "substrate/utils/prometheus", "substrate/utils/wasm-builder", - "cumulus/parachains/integration-tests/emulated/networks/rococo-westend-system", ] -default-members = [ "polkadot", "substrate/bin/node/cli" ] +default-members = ["polkadot", "substrate/bin/node/cli"] [profile.release] # Polkadot runtime requires unwinding. diff --git a/bridges/bin/runtime-common/Cargo.toml b/bridges/bin/runtime-common/Cargo.toml index 0ccf30987e8..a5aa53f2633 100644 --- a/bridges/bin/runtime-common/Cargo.toml +++ b/bridges/bin/runtime-common/Cargo.toml @@ -50,7 +50,7 @@ bp-test-utils = { path = "../../primitives/test-utils" } pallet-balances = { path = "../../../substrate/frame/balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-messages/std", @@ -92,4 +92,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", ] -integrity-test = [ "static_assertions" ] +integrity-test = ["static_assertions"] diff --git a/bridges/modules/grandpa/Cargo.toml b/bridges/modules/grandpa/Cargo.toml index dbbe18febc6..573edbf5a65 100644 --- a/bridges/modules/grandpa/Cargo.toml +++ b/bridges/modules/grandpa/Cargo.toml @@ -37,7 +37,7 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-runtime/std", diff --git a/bridges/modules/messages/Cargo.toml b/bridges/modules/messages/Cargo.toml index a5c86693309..751ef45168d 100644 --- a/bridges/modules/messages/Cargo.toml +++ b/bridges/modules/messages/Cargo.toml @@ -31,7 +31,7 @@ pallet-balances = { path = "../../../substrate/frame/balances" } sp-io = { path = "../../../substrate/primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-runtime/std", diff --git a/bridges/modules/parachains/Cargo.toml b/bridges/modules/parachains/Cargo.toml index 0d1b61ddea8..4af8997c5f3 100644 --- a/bridges/modules/parachains/Cargo.toml +++ b/bridges/modules/parachains/Cargo.toml @@ -35,7 +35,7 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-parachains/std", diff --git a/bridges/modules/relayers/Cargo.toml b/bridges/modules/relayers/Cargo.toml index 6ec1971e3f6..3011a11db5c 100644 --- a/bridges/modules/relayers/Cargo.toml +++ b/bridges/modules/relayers/Cargo.toml @@ -34,7 +34,7 @@ sp-io = { path = "../../../substrate/primitives/io" } sp-runtime = { path = "../../../substrate/primitives/runtime" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-relayers/std", diff --git a/bridges/modules/xcm-bridge-hub-router/Cargo.toml b/bridges/modules/xcm-bridge-hub-router/Cargo.toml index 56b9139d7d5..e4d25fae9d3 100644 --- a/bridges/modules/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/modules/xcm-bridge-hub-router/Cargo.toml @@ -34,7 +34,7 @@ sp-io = { path = "../../../substrate/primitives/io" } sp-std = { path = "../../../substrate/primitives/std" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-xcm-bridge-hub-router/std", "codec/std", diff --git a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml b/bridges/primitives/chain-asset-hub-rococo/Cargo.toml index 088510adcec..889475840b3 100644 --- a/bridges/primitives/chain-asset-hub-rococo/Cargo.toml +++ b/bridges/primitives/chain-asset-hub-rococo/Cargo.toml @@ -17,7 +17,7 @@ frame-support = { path = "../../../substrate/frame/support", default-features = bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-xcm-bridge-hub-router/std", "codec/std", diff --git a/bridges/primitives/chain-asset-hub-westend/Cargo.toml b/bridges/primitives/chain-asset-hub-westend/Cargo.toml index c880f159ac1..84b9604a61b 100644 --- a/bridges/primitives/chain-asset-hub-westend/Cargo.toml +++ b/bridges/primitives/chain-asset-hub-westend/Cargo.toml @@ -17,7 +17,7 @@ frame-support = { path = "../../../substrate/frame/support", default-features = bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-xcm-bridge-hub-router/std", "codec/std", diff --git a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml index 46697913674..dab1b065f6f 100644 --- a/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-cumulus/Cargo.toml @@ -24,7 +24,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml index c4cd229ef43..8e6364101f2 100644 --- a/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-kusama/Cargo.toml @@ -21,7 +21,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-bridge-hub-cumulus/std", "bp-messages/std", diff --git a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml index 4913d87e5fb..961d4aeb2e2 100644 --- a/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-polkadot/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-bridge-hub-cumulus/std", "bp-messages/std", diff --git a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml b/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml index 05b8163e9fc..28fe3c283bc 100644 --- a/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-rococo/Cargo.toml @@ -21,7 +21,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-bridge-hub-cumulus/std", "bp-messages/std", diff --git a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml b/bridges/primitives/chain-bridge-hub-westend/Cargo.toml index 22daf280868..409f84840a8 100644 --- a/bridges/primitives/chain-bridge-hub-westend/Cargo.toml +++ b/bridges/primitives/chain-bridge-hub-westend/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-bridge-hub-cumulus/std", "bp-messages/std", diff --git a/bridges/primitives/chain-kusama/Cargo.toml b/bridges/primitives/chain-kusama/Cargo.toml index 2d63c3f374f..41570d4f9bc 100644 --- a/bridges/primitives/chain-kusama/Cargo.toml +++ b/bridges/primitives/chain-kusama/Cargo.toml @@ -21,7 +21,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml b/bridges/primitives/chain-polkadot-bulletin/Cargo.toml index 1dd45ba95fd..3be056dd0a7 100644 --- a/bridges/primitives/chain-polkadot-bulletin/Cargo.toml +++ b/bridges/primitives/chain-polkadot-bulletin/Cargo.toml @@ -26,7 +26,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-messages/std", diff --git a/bridges/primitives/chain-polkadot/Cargo.toml b/bridges/primitives/chain-polkadot/Cargo.toml index 539b10ef9c6..579e997e0ab 100644 --- a/bridges/primitives/chain-polkadot/Cargo.toml +++ b/bridges/primitives/chain-polkadot/Cargo.toml @@ -21,7 +21,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-rococo/Cargo.toml b/bridges/primitives/chain-rococo/Cargo.toml index 469be1dbd33..dc7c482a4cc 100644 --- a/bridges/primitives/chain-rococo/Cargo.toml +++ b/bridges/primitives/chain-rococo/Cargo.toml @@ -21,7 +21,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/chain-westend/Cargo.toml b/bridges/primitives/chain-westend/Cargo.toml index 797621bbce2..7c74cb1361b 100644 --- a/bridges/primitives/chain-westend/Cargo.toml +++ b/bridges/primitives/chain-westend/Cargo.toml @@ -21,7 +21,7 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/header-chain/Cargo.toml b/bridges/primitives/header-chain/Cargo.toml index 19b2819bddc..bc92054e5dc 100644 --- a/bridges/primitives/header-chain/Cargo.toml +++ b/bridges/primitives/header-chain/Cargo.toml @@ -30,7 +30,7 @@ hex = "0.4" hex-literal = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "bp-runtime/std", "codec/std", diff --git a/bridges/primitives/messages/Cargo.toml b/bridges/primitives/messages/Cargo.toml index 7a61643a0bc..c2f43523aaf 100644 --- a/bridges/primitives/messages/Cargo.toml +++ b/bridges/primitives/messages/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } +codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } @@ -27,7 +27,7 @@ hex = "0.4" hex-literal = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-runtime/std", diff --git a/bridges/primitives/parachains/Cargo.toml b/bridges/primitives/parachains/Cargo.toml index 11e9336f66a..a339203fd66 100644 --- a/bridges/primitives/parachains/Cargo.toml +++ b/bridges/primitives/parachains/Cargo.toml @@ -25,7 +25,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-polkadot-core/std", diff --git a/bridges/primitives/polkadot-core/Cargo.toml b/bridges/primitives/polkadot-core/Cargo.toml index e2bd4c29522..67fb9af8b21 100644 --- a/bridges/primitives/polkadot-core/Cargo.toml +++ b/bridges/primitives/polkadot-core/Cargo.toml @@ -29,7 +29,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false hex = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-runtime/std", diff --git a/bridges/primitives/relayers/Cargo.toml b/bridges/primitives/relayers/Cargo.toml index ffed2debbe6..cf94ca44d00 100644 --- a/bridges/primitives/relayers/Cargo.toml +++ b/bridges/primitives/relayers/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } +codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } # Bridge Dependencies @@ -26,7 +26,7 @@ hex = "0.4" hex-literal = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "bp-messages/std", "bp-runtime/std", diff --git a/bridges/primitives/runtime/Cargo.toml b/bridges/primitives/runtime/Cargo.toml index 48f6722c982..a713f636bb8 100644 --- a/bridges/primitives/runtime/Cargo.toml +++ b/bridges/primitives/runtime/Cargo.toml @@ -31,7 +31,7 @@ trie-db = { version = "0.28.0", default-features = false } hex-literal = "0.4" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/bridges/primitives/test-utils/Cargo.toml b/bridges/primitives/test-utils/Cargo.toml index 9836c1877f0..fac5c5b5896 100644 --- a/bridges/primitives/test-utils/Cargo.toml +++ b/bridges/primitives/test-utils/Cargo.toml @@ -7,9 +7,9 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -bp-header-chain = { path = "../header-chain", default-features = false } +bp-header-chain = { path = "../header-chain", default-features = false } bp-parachains = { path = "../parachains", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } ed25519-dalek = { version = "2.0", default-features = false } @@ -22,7 +22,7 @@ sp-std = { path = "../../../substrate/primitives/std", default-features = false sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-header-chain/std", "bp-parachains/std", diff --git a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml index fb079b48e42..5a49db62fec 100644 --- a/bridges/primitives/xcm-bridge-hub-router/Cargo.toml +++ b/bridges/primitives/xcm-bridge-hub-router/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } +codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } # Substrate Dependencies @@ -15,5 +15,5 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-core = { path = "../../../substrate/primitives/core", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std" ] +default = ["std"] +std = ["codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std"] diff --git a/cumulus/client/collator/Cargo.toml b/cumulus/client/collator/Cargo.toml index ad9f01ed083..7ac0bbfe6f1 100644 --- a/cumulus/client/collator/Cargo.toml +++ b/cumulus/client/collator/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] parking_lot = "0.12.1" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" tracing = "0.1.25" diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index f440270c982..e07f10d6090 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" tracing = "0.1.37" schnellru = "0.2.1" diff --git a/cumulus/client/consensus/common/Cargo.toml b/cumulus/client/consensus/common/Cargo.toml index 9dfd14b1cf5..92918cd7b5b 100644 --- a/cumulus/client/consensus/common/Cargo.toml +++ b/cumulus/client/consensus/common/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } dyn-clone = "1.0.12" futures = "0.3.28" log = "0.4.20" diff --git a/cumulus/client/network/Cargo.toml b/cumulus/client/network/Cargo.toml index 08956f9f6c6..3893647e7c5 100644 --- a/cumulus/client/network/Cargo.toml +++ b/cumulus/client/network/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-trait = "0.1.73" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" parking_lot = "0.12.1" diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index e407b33e0e2..29f793c7328 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -7,7 +7,7 @@ edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.28" futures-timer = "3.0.2" rand = "0.8.5" diff --git a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml index 87f0eabd9b5..1d414736503 100644 --- a/cumulus/client/relay-chain-inprocess-interface/Cargo.toml +++ b/cumulus/client/relay-chain-inprocess-interface/Cargo.toml @@ -39,7 +39,7 @@ sp-keyring = { path = "../../../substrate/primitives/keyring" } # Polkadot polkadot-primitives = { path = "../../../polkadot/primitives" } polkadot-test-client = { path = "../../../polkadot/node/test/client" } -metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features = ["futures_channel"] } # Cumulus cumulus-test-service = { path = "../../test/service" } diff --git a/cumulus/client/relay-chain-minimal-node/Cargo.toml b/cumulus/client/relay-chain-minimal-node/Cargo.toml index ee93df09ce1..acaed5a4f6c 100644 --- a/cumulus/client/relay-chain-minimal-node/Cargo.toml +++ b/cumulus/client/relay-chain-minimal-node/Cargo.toml @@ -48,4 +48,3 @@ tracing = "0.1.37" async-trait = "0.1.73" futures = "0.3.28" parking_lot = "0.12.1" - diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 67ee3571c04..11d8bc9b4df 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -38,7 +38,7 @@ url = "2.4.0" serde_json = "1.0.108" serde = "1.0.193" schnellru = "0.2.1" -smoldot = { version = "0.11.0", default_features = false, features = ["std"]} +smoldot = { version = "0.11.0", default_features = false, features = ["std"] } smoldot-light = { version = "0.9.0", default_features = false, features = ["std"] } either = "1.8.1" thiserror = "1.0.48" diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index cc2f22e6565..55623276eaf 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -42,4 +42,3 @@ cumulus-primitives-proof-size-hostfunction = { path = "../../primitives/proof-si cumulus-relay-chain-interface = { path = "../relay-chain-interface" } cumulus-relay-chain-inprocess-interface = { path = "../relay-chain-inprocess-interface" } cumulus-relay-chain-minimal-node = { path = "../relay-chain-minimal-node" } - diff --git a/cumulus/pallets/aura-ext/Cargo.toml b/cumulus/pallets/aura-ext/Cargo.toml index c9d82ead1eb..16f73aa540e 100644 --- a/cumulus/pallets/aura-ext/Cargo.toml +++ b/cumulus/pallets/aura-ext/Cargo.toml @@ -11,14 +11,14 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false} -pallet-timestamp= { path = "../../../substrate/frame/timestamp", default-features = false} -sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false} -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Cumulus cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false } @@ -29,7 +29,7 @@ cumulus-pallet-parachain-system = { path = "../parachain-system", default-featur cumulus-pallet-parachain-system = { path = "../parachain-system" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-parachain-system/std", diff --git a/cumulus/pallets/collator-selection/Cargo.toml b/cumulus/pallets/collator-selection/Cargo.toml index 68e4a681c2b..76efbf1caf6 100644 --- a/cumulus/pallets/collator-selection/Cargo.toml +++ b/cumulus/pallets/collator-selection/Cargo.toml @@ -18,15 +18,15 @@ codec = { default-features = false, features = ["derive"], package = "parity-sca rand = { version = "0.8.5", features = ["std_rng"], default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} -pallet-session = { path = "../../../substrate/frame/session", default-features = false} +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-staking = { path = "../../../substrate/primitives/staking", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } +pallet-session = { path = "../../../substrate/frame/session", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } [dev-dependencies] sp-core = { path = "../../../substrate/primitives/core" } @@ -39,7 +39,7 @@ pallet-balances = { path = "../../../substrate/frame/balances" } pallet-aura = { path = "../../../substrate/frame/aura" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -75,4 +75,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/pallets/dmp-queue/Cargo.toml b/cumulus/pallets/dmp-queue/Cargo.toml index 43fb131aec2..0b64410433f 100644 --- a/cumulus/pallets/dmp-queue/Cargo.toml +++ b/cumulus/pallets/dmp-queue/Cargo.toml @@ -23,7 +23,7 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-io = { path = "../../../substrate/primitives/io", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus cumulus-primitives-core = { path = "../../primitives/core", default-features = false } @@ -33,7 +33,7 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-tracing = { path = "../../../substrate/primitives/tracing" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", diff --git a/cumulus/pallets/parachain-system/Cargo.toml b/cumulus/pallets/parachain-system/Cargo.toml index cf6af4b4786..187cf21cea6 100644 --- a/cumulus/pallets/parachain-system/Cargo.toml +++ b/cumulus/pallets/parachain-system/Cargo.toml @@ -16,24 +16,24 @@ trie-db = { version = "0.28.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false} -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false} -sp-version = { path = "../../../substrate/primitives/version", default-features = false} +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-externalities = { path = "../../../substrate/primitives/externalities", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false } # Polkadot -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = [ "wasm-api" ]} +polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false, features = ["wasm-api"] } polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus cumulus-pallet-parachain-system-proc-macro = { path = "proc-macro", default-features = false } @@ -60,7 +60,7 @@ cumulus-test-client = { path = "../../test/client" } cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "bytes/std", "codec/std", diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index 6accfa92c57..f4bfdf27062 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -16,5 +16,5 @@ quote = "1.0.33" proc-macro-crate = "1.3.1" [features] -default = [ "std" ] +default = ["std"] std = [] diff --git a/cumulus/pallets/session-benchmarking/Cargo.toml b/cumulus/pallets/session-benchmarking/Cargo.toml index a28971d66d3..4c85b3d7171 100644 --- a/cumulus/pallets/session-benchmarking/Cargo.toml +++ b/cumulus/pallets/session-benchmarking/Cargo.toml @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parity-scale-codec = { version = "3.6.4", default-features = false } -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} -pallet-session = { path = "../../../substrate/frame/session", default-features = false} +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +pallet-session = { path = "../../../substrate/frame/session", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/cumulus/pallets/solo-to-para/Cargo.toml b/cumulus/pallets/solo-to-para/Cargo.toml index e4ef72965c7..dc79d287d4d 100644 --- a/cumulus/pallets/solo-to-para/Cargo.toml +++ b/cumulus/pallets/solo-to-para/Cargo.toml @@ -11,20 +11,20 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false} +cumulus-pallet-parachain-system = { path = "../parachain-system", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-parachain-system/std", diff --git a/cumulus/pallets/xcm/Cargo.toml b/cumulus/pallets/xcm/Cargo.toml index c8e819979bd..f36d0aa52de 100644 --- a/cumulus/pallets/xcm/Cargo.toml +++ b/cumulus/pallets/xcm/Cargo.toml @@ -10,18 +10,18 @@ description = "Pallet for stuff specific to parachains' usage of XCM" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/pallets/xcmp-queue/Cargo.toml b/cumulus/pallets/xcmp-queue/Cargo.toml index fba006e7986..1bc21bbbb58 100644 --- a/cumulus/pallets/xcmp-queue/Cargo.toml +++ b/cumulus/pallets/xcmp-queue/Cargo.toml @@ -7,17 +7,17 @@ description = "Pallet to queue outbound and inbound XCMP messages." license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ], default-features = false } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { version = "0.4.20", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } sp-core = { path = "../../../substrate/primitives/core", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } # Polkadot @@ -30,7 +30,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm cumulus-primitives-core = { path = "../../primitives/core", default-features = false } # Optional import for benchmarking -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } bounded-collections = { version = "0.1.4", default-features = false } # Bridges @@ -50,7 +50,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/x cumulus-pallet-parachain-system = { path = "../parachain-system", features = ["parameterized-consensus-hook"] } [features] -default = [ "std" ] +default = ["std"] std = [ "bounded-collections/std", "bp-xcm-bridge-hub-router?/std", @@ -96,4 +96,4 @@ try-runtime = [ "polkadot-runtime-parachains/try-runtime", "sp-runtime/try-runtime", ] -bridging = [ "bp-xcm-bridge-hub-router" ] +bridging = ["bp-xcm-bridge-hub-router"] diff --git a/cumulus/parachain-template/node/Cargo.toml b/cumulus/parachain-template/node/Cargo.toml index ba4060f54f9..19ee334cf95 100644 --- a/cumulus/parachain-template/node/Cargo.toml +++ b/cumulus/parachain-template/node/Cargo.toml @@ -57,7 +57,7 @@ substrate-prometheus-endpoint = { path = "../../../substrate/utils/prometheus" } # Polkadot polkadot-cli = { path = "../../../polkadot/cli", features = ["rococo-native"] } polkadot-primitives = { path = "../../../polkadot/primitives" } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } # Cumulus cumulus-client-cli = { path = "../../client/cli" } @@ -91,4 +91,3 @@ try-runtime = [ "polkadot-cli/try-runtime", "sp-runtime/try-runtime", ] - diff --git a/cumulus/parachain-template/pallets/template/Cargo.toml b/cumulus/parachain-template/pallets/template/Cargo.toml index 4267341a2d7..71b78a7175c 100644 --- a/cumulus/parachain-template/pallets/template/Cargo.toml +++ b/cumulus/parachain-template/pallets/template/Cargo.toml @@ -16,20 +16,20 @@ codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-support = { path = "../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../substrate/frame/system", default-features = false} +frame-benchmarking = { path = "../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } [dev-dependencies] serde = { version = "1.0.193" } # Substrate -sp-core = { path = "../../../../substrate/primitives/core", default-features = false} -sp-io = { path = "../../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} +sp-core = { path = "../../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/cumulus/parachain-template/runtime/Cargo.toml b/cumulus/parachain-template/runtime/Cargo.toml index 01e25007873..d83867a9c7c 100644 --- a/cumulus/parachain-template/runtime/Cargo.toml +++ b/cumulus/parachain-template/runtime/Cargo.toml @@ -25,48 +25,48 @@ smallvec = "1.11.0" pallet-parachain-template = { path = "../pallets/template", default-features = false } # Substrate -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-session = { path = "../../../substrate/frame/session", default-features = false} -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -sp-api = { path = "../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../substrate/primitives/core", default-features = false} +pallet-session = { path = "../../../substrate/frame/session", default-features = false } +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../pallets/aura-ext", default-features = false } cumulus-pallet-dmp-queue = { path = "../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } @@ -76,7 +76,7 @@ parachains-common = { path = "../../parachains/common", default-features = false parachain-info = { package = "staging-parachain-info", path = "../../parachains/pallets/parachain-info", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -183,4 +183,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/common/Cargo.toml b/cumulus/parachains/common/Cargo.toml index 4aad4dec236..5475fd2aa26 100644 --- a/cumulus/parachains/common/Cargo.toml +++ b/cumulus/parachains/common/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"], default-features = false } log = { version = "0.4.19", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -num-traits = { version = "0.2", default-features = false} +num-traits = { version = "0.2", default-features = false } smallvec = "1.11.0" # Substrate @@ -31,12 +31,12 @@ sp-runtime = { path = "../../../substrate/primitives/runtime", default-features sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot -rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false} -westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false} -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} +rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false } +westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false } +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } # Cumulus pallet-collator-selection = { path = "../../pallets/collator-selection", default-features = false } @@ -45,14 +45,14 @@ cumulus-primitives-utility = { path = "../../primitives/utility", default-featur parachain-info = { package = "staging-parachain-info", path = "../pallets/parachain-info", default-features = false } [dev-dependencies] -pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} +pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml index 08bb284cded..92716083d69 100644 --- a/cumulus/parachains/integration-tests/emulated/common/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/common/Cargo.toml @@ -13,29 +13,29 @@ serde_json = "1.0.108" # Substrate grandpa = { package = "sc-consensus-grandpa", path = "../../../../../substrate/client/consensus/grandpa" } -sp-authority-discovery = { path = "../../../../../substrate/primitives/authority-discovery", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-consensus-babe = { path = "../../../../../substrate/primitives/consensus/babe", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false} -pallet-im-online = { path = "../../../../../substrate/frame/im-online", default-features = false} +sp-authority-discovery = { path = "../../../../../substrate/primitives/authority-discovery", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-consensus-babe = { path = "../../../../../substrate/primitives/consensus/babe", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } +pallet-im-online = { path = "../../../../../substrate/frame/im-online", default-features = false } beefy-primitives = { package = "sp-consensus-beefy", path = "../../../../../substrate/primitives/consensus/beefy" } # Polkadot polkadot-service = { path = "../../../../../polkadot/node/service", default-features = false, features = ["full-node"] } -polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false} +polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false } polkadot-runtime-parachains = { path = "../../../../../polkadot/runtime/parachains" } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } # Cumulus parachains-common = { path = "../../../common" } cumulus-primitives-core = { path = "../../../../primitives/core" } -xcm-emulator = { path = "../../../../xcm/xcm-emulator", default-features = false} -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false} +xcm-emulator = { path = "../../../../xcm/xcm-emulator", default-features = false } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system" } asset-test-utils = { path = "../../../runtimes/assets/test-utils" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml index 23f80f33f78..16e2f736bdf 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/Cargo.toml @@ -12,17 +12,17 @@ codec = { package = "parity-scale-codec", version = "3.4.0", default-features = assert_matches = "1.5.0" # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } rococo-runtime = { path = "../../../../../../../polkadot/runtime/rococo" } # Cumulus @@ -31,4 +31,4 @@ parachains-common = { path = "../../../../../../parachains/common" } asset-hub-rococo-runtime = { path = "../../../../../runtimes/assets/asset-hub-rococo" } emulated-integration-tests-common = { path = "../../../common", default-features = false } penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } -rococo-system-emulated-network ={ path = "../../../networks/rococo-system" } +rococo-system-emulated-network = { path = "../../../networks/rococo-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml index 4c7537bfd53..74e6660d4d4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/Cargo.toml @@ -12,22 +12,22 @@ codec = { package = "parity-scale-codec", version = "3.4.0", default-features = assert_matches = "1.5.0" # Substrate -sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false} -pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false} -pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false} -pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false} -pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false} -pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false} +sp-runtime = { path = "../../../../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../../../substrate/frame/system", default-features = false } +pallet-balances = { path = "../../../../../../../substrate/frame/balances", default-features = false } +pallet-assets = { path = "../../../../../../../substrate/frame/assets", default-features = false } +pallet-asset-conversion = { path = "../../../../../../../substrate/frame/asset-conversion", default-features = false } +pallet-treasury = { path = "../../../../../../../substrate/frame/treasury", default-features = false } +pallet-asset-rate = { path = "../../../../../../../substrate/frame/asset-rate", default-features = false } pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue", default-features = false } # Polkadot polkadot-runtime-common = { path = "../../../../../../../polkadot/runtime/common" } -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } westend-runtime = { path = "../../../../../../../polkadot/runtime/westend" } westend-runtime-constants = { path = "../../../../../../../polkadot/runtime/westend/constants", default-features = false } @@ -37,6 +37,6 @@ asset-hub-westend-runtime = { path = "../../../../../runtimes/assets/asset-hub-w asset-test-utils = { path = "../../../../../runtimes/assets/test-utils" } cumulus-pallet-dmp-queue = { default-features = false, path = "../../../../../../pallets/dmp-queue" } cumulus-pallet-parachain-system = { default-features = false, path = "../../../../../../pallets/parachain-system" } -emulated-integration-tests-common = { path = "../../../common", default-features = false} +emulated-integration-tests-common = { path = "../../../common", default-features = false } penpal-runtime = { path = "../../../../../runtimes/testing/penpal" } -westend-system-emulated-network ={ path = "../../../networks/westend-system" } +westend-system-emulated-network = { path = "../../../networks/westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml index 00e3af2e4ff..826b55507ed 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/Cargo.toml @@ -17,19 +17,19 @@ pallet-balances = { path = "../../../../../../../substrate/frame/balances", defa pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } # Bridges -pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false} -bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false} +pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false } +bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus asset-test-utils = { path = "../../../../../../parachains/runtimes/assets/test-utils" } parachains-common = { path = "../../../../../../parachains/common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false} -cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false } bridge-hub-rococo-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-rococo", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false} +emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml index e5b1fce5f2b..cb53d7fc0e1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/Cargo.toml @@ -17,19 +17,19 @@ pallet-balances = { path = "../../../../../../../substrate/frame/balances", defa pallet-message-queue = { path = "../../../../../../../substrate/frame/message-queue" } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false} -pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../../../polkadot/xcm", default-features = false } +pallet-xcm = { path = "../../../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../../../polkadot/xcm/xcm-executor", default-features = false } # Bridges -pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false} -bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false} +pallet-bridge-messages = { path = "../../../../../../../bridges/modules/messages", default-features = false } +bp-messages = { path = "../../../../../../../bridges/primitives/messages", default-features = false } # Cumulus asset-test-utils = { path = "../../../../../../parachains/runtimes/assets/test-utils" } parachains-common = { path = "../../../../../../parachains/common" } -cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false} -cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false} +cumulus-pallet-xcmp-queue = { path = "../../../../../../pallets/xcmp-queue", default-features = false } +cumulus-pallet-dmp-queue = { path = "../../../../../../pallets/dmp-queue", default-features = false } bridge-hub-westend-runtime = { path = "../../../../../../parachains/runtimes/bridge-hubs/bridge-hub-westend", default-features = false } -emulated-integration-tests-common = { path = "../../../common", default-features = false} +emulated-integration-tests-common = { path = "../../../common", default-features = false } rococo-westend-system-emulated-network = { path = "../../../networks/rococo-westend-system" } diff --git a/cumulus/parachains/pallets/collective-content/Cargo.toml b/cumulus/parachains/pallets/collective-content/Cargo.toml index e3f8023f419..26899a9e774 100644 --- a/cumulus/parachains/pallets/collective-content/Cargo.toml +++ b/cumulus/parachains/pallets/collective-content/Cargo.toml @@ -22,7 +22,7 @@ sp-std = { path = "../../../../substrate/primitives/std", default-features = fal sp-io = { path = "../../../../substrate/primitives/io", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/cumulus/parachains/pallets/parachain-info/Cargo.toml b/cumulus/parachains/pallets/parachain-info/Cargo.toml index 727182dfb8e..b5b6ec304df 100644 --- a/cumulus/parachains/pallets/parachain-info/Cargo.toml +++ b/cumulus/parachains/pallets/parachain-info/Cargo.toml @@ -10,16 +10,16 @@ description = "Pallet to store the parachain ID" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../substrate/frame/system", default-features = false} +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../../substrate/primitives/std", default-features = false} +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../substrate/primitives/std", default-features = false } cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/parachains/pallets/ping/Cargo.toml b/cumulus/parachains/pallets/ping/Cargo.toml index 0133befa855..c661e4260c6 100644 --- a/cumulus/parachains/pallets/ping/Cargo.toml +++ b/cumulus/parachains/pallets/ping/Cargo.toml @@ -10,18 +10,18 @@ description = "Ping Pallet for Cumulus XCM/UMP testing." codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../../../substrate/primitives/std", default-features = false} -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} -frame-support = { path = "../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../substrate/frame/system", default-features = false} +sp-std = { path = "../../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } cumulus-pallet-xcm = { path = "../../../pallets/xcm", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-xcm/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index f3bdabd9cef..15166de15ae 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -7,15 +7,10 @@ description = "Rococo variant of Asset Hub parachain runtime" license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ - "derive", - "max-encoded-len", -] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ - "derive", -] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate @@ -60,11 +55,7 @@ sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction- sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } sp-weights = { path = "../../../../../substrate/primitives/weights", default-features = false } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = [ - "codec", - "scale-info", - "num-traits", -] } +primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } # Polkadot rococo-runtime-constants = { path = "../../../../../polkadot/runtime/rococo/constants", default-features = false } @@ -79,14 +70,10 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = [ - "parameterized-consensus-hook", -] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } -cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = [ - "bridging", -] } +cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false, features = ["bridging"] } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-utility = { path = "../../../../primitives/utility", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } @@ -108,7 +95,7 @@ asset-test-utils = { path = "../test-utils" } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] # When enabled the `state_version` is set to `1`. # This means that the chain will start using the new state format. The migration is lazy, so # it requires to write a storage value to use the new state format. To migrate all the other @@ -116,7 +103,7 @@ default = [ "std" ] # This pallet will migrate the entire state, controlled through some account. # # This feature should be removed when the main-net will be migrated. -state-trie-version-1 = [ "pallet-state-trie-migration" ] +state-trie-version-1 = ["pallet-state-trie-migration"] runtime-benchmarks = [ "assets-common/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", @@ -257,9 +244,9 @@ std = [ "xcm/std", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml index bc06c6fe9a1..2eb8e9a55d7 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/Cargo.toml @@ -52,7 +52,7 @@ sp-storage = { path = "../../../../../substrate/primitives/storage", default-fea sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # num-traits feature needed for dex integer sq root: -primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "scale-info", "num-traits"] } +primitive-types = { version = "0.12.1", default-features = false, features = ["codec", "num-traits", "scale-info"] } # Polkadot pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } @@ -94,7 +94,7 @@ asset-test-utils = { path = "../test-utils" } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "assets-common/runtime-benchmarks", "cumulus-pallet-parachain-system/runtime-benchmarks", @@ -232,9 +232,9 @@ std = [ "xcm/std", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/assets/common/Cargo.toml b/cumulus/parachains/runtimes/assets/common/Cargo.toml index 49fc2a0fa5e..e78d8331039 100644 --- a/cumulus/parachains/runtimes/assets/common/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/common/Cargo.toml @@ -13,18 +13,18 @@ log = { version = "0.4.20", default-features = false } impl-trait-for-tuples = "0.2.2" # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } pallet-asset-conversion = { path = "../../../../../substrate/frame/asset-conversion", default-features = false } pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus parachains-common = { path = "../../../common", default-features = false } @@ -34,7 +34,7 @@ cumulus-primitives-core = { path = "../../../../primitives/core", default-featur substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml index 1dc7cecbb62..c8ea4490d10 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/test-utils/Cargo.toml @@ -10,19 +10,19 @@ license = "Apache-2.0" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } # Substrate -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../../common", default-features = false } @@ -50,7 +50,7 @@ hex-literal = "0.4.1" substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "assets-common/std", "codec/std", diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index 809e952ccb9..08758aaf7bb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -109,7 +109,7 @@ bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", fe sp-keyring = { path = "../../../../../substrate/primitives/keyring" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-asset-hub-rococo/std", "bp-asset-hub-westend/std", @@ -245,9 +245,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml index 90d72ea3edc..c6ceb4b4c90 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/Cargo.toml @@ -98,7 +98,7 @@ bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", fe sp-keyring = { path = "../../../../../substrate/primitives/keyring" } [features] -default = [ "std" ] +default = ["std"] std = [ "bp-asset-hub-westend/std", "bp-bridge-hub-rococo/std", @@ -233,9 +233,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml index bd171be53bf..7325a87165c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/test-utils/Cargo.toml @@ -12,21 +12,21 @@ log = { version = "0.4.20", default-features = false } # Substrate frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} -sp-io = { path = "../../../../../substrate/primitives/io", default-features = false} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../../../substrate/primitives/io", default-features = false } sp-keyring = { path = "../../../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } sp-tracing = { path = "../../../../../substrate/primitives/tracing" } -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } # Cumulus asset-test-utils = { path = "../../assets/test-utils" } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../../pallets/collator-selection", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -34,11 +34,11 @@ parachains-common = { path = "../../../common", default-features = false } parachains-runtimes-test-utils = { path = "../../test-utils", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } pallet-xcm-benchmarks = { path = "../../../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false, optional = true } -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Bridges bp-header-chain = { path = "../../../../../bridges/primitives/header-chain", default-features = false } @@ -55,7 +55,7 @@ pallet-bridge-relayers = { path = "../../../../../bridges/modules/relayers", def bridge-runtime-common = { path = "../../../../../bridges/bin/runtime-common", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "asset-test-utils/std", "bp-header-chain/std", diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml index 1801488eeed..06f0a6239e3 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/Cargo.toml @@ -7,15 +7,10 @@ license = "Apache-2.0" description = "Westend Collectives Parachain Runtime" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ - "derive", - "max-encoded-len", -] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } hex-literal = { version = "0.4.1" } log = { version = "0.4.20", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = [ - "derive", -] } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } smallvec = "1.11.0" # Substrate @@ -91,7 +86,7 @@ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", sp-io = { path = "../../../../../substrate/primitives/io", features = ["std"] } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-session-benchmarking/runtime-benchmarks", @@ -228,9 +223,9 @@ std = [ "xcm/std", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml index f5626a90f36..ca45f4760d0 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/Cargo.toml @@ -20,38 +20,38 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive smallvec = "1.11.0" # Substrate -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-insecure-randomness-collective-flip = { path = "../../../../../substrate/frame/insecure-randomness-collective-flip", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} -pallet-contracts = { path = "../../../../../substrate/frame/contracts", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-insecure-randomness-collective-flip = { path = "../../../../../substrate/frame/insecure-randomness-collective-flip", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-multisig = { path = "../../../../../substrate/frame/multisig", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-utility = { path = "../../../../../substrate/frame/utility", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } +pallet-contracts = { path = "../../../../../substrate/frame/contracts", default-features = false } # Polkadot pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } @@ -67,7 +67,7 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkad cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } @@ -78,7 +78,7 @@ parachain-info = { package = "staging-parachain-info", path = "../../../pallets/ parachains-common = { path = "../../../common", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -196,9 +196,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml index a30cdf35769..b8efc4fbbcf 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/Cargo.toml @@ -11,40 +11,40 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-glutton = { path = "../../../../../substrate/frame/glutton", default-features = false, optional = true } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false, optional = true } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -56,7 +56,7 @@ parachains-common = { path = "../../../common", default-features = false } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", @@ -130,9 +130,9 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml index 65ca58ac8b3..23312172bd7 100644 --- a/cumulus/parachains/runtimes/starters/seedling/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/seedling/Cargo.toml @@ -11,29 +11,29 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-solo-to-para = { path = "../../../../pallets/solo-to-para", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } cumulus-primitives-timestamp = { path = "../../../../primitives/timestamp", default-features = false } @@ -44,7 +44,7 @@ parachains-common = { path = "../../../common", default-features = false } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -77,4 +77,4 @@ std = [ "substrate-wasm-builder", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/starters/shell/Cargo.toml b/cumulus/parachains/runtimes/starters/shell/Cargo.toml index 77449f977bb..a285d3d977e 100644 --- a/cumulus/parachains/runtimes/starters/shell/Cargo.toml +++ b/cumulus/parachains/runtimes/starters/shell/Cargo.toml @@ -11,34 +11,34 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } parachain-info = { package = "staging-parachain-info", path = "../../../pallets/parachain-info", default-features = false } @@ -48,7 +48,7 @@ parachains-common = { path = "../../../common", default-features = false } substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -97,4 +97,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/test-utils/Cargo.toml b/cumulus/parachains/runtimes/test-utils/Cargo.toml index 62bce02bd35..b4453e7f1a6 100644 --- a/cumulus/parachains/runtimes/test-utils/Cargo.toml +++ b/cumulus/parachains/runtimes/test-utils/Cargo.toml @@ -10,34 +10,34 @@ license = "Apache-2.0" codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "max-encoded-len"] } # Substrate -frame-support = { path = "../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../substrate/frame/system", default-features = false} -pallet-assets = { path = "../../../../substrate/frame/assets", default-features = false} -pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../substrate/frame/session", default-features = false} -sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura", default-features = false} -sp-io = { path = "../../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../substrate/frame/system", default-features = false } +pallet-assets = { path = "../../../../substrate/frame/assets", default-features = false } +pallet-balances = { path = "../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../substrate/frame/session", default-features = false } +sp-consensus-aura = { path = "../../../../substrate/primitives/consensus/aura", default-features = false } +sp-io = { path = "../../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../../substrate/primitives/std", default-features = false } sp-tracing = { path = "../../../../substrate/primitives/tracing" } -sp-core = { path = "../../../../substrate/primitives/core", default-features = false} +sp-core = { path = "../../../../substrate/primitives/core", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcmp-queue = { path = "../../../pallets/xcmp-queue", default-features = false } pallet-collator-selection = { path = "../../../pallets/collator-selection", default-features = false } parachains-common = { path = "../../common", default-features = false } -parachain-info = {package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } +parachain-info = { package = "staging-parachain-info", path = "../../pallets/parachain-info", default-features = false } assets-common = { path = "../assets/common", default-features = false } cumulus-primitives-core = { path = "../../../primitives/core", default-features = false } cumulus-primitives-parachain-inherent = { path = "../../../primitives/parachain-inherent", default-features = false } cumulus-test-relay-sproof-builder = { path = "../../../test/relay-sproof-builder", default-features = false } # Polkadot -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false} -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain", default-features = false} +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../polkadot/parachain", default-features = false } [dev-dependencies] hex-literal = "0.4.1" @@ -46,7 +46,7 @@ hex-literal = "0.4.1" substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "assets-common/std", "codec/std", diff --git a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml index fb66275b025..6e044319d2e 100644 --- a/cumulus/parachains/runtimes/testing/penpal/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/penpal/Cargo.toml @@ -22,52 +22,52 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive smallvec = "1.11.0" # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-session = { path = "../../../../../substrate/frame/session", default-features = false} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-benchmarking = { path = "../../../../../substrate/frame/system/benchmarking", default-features = false, optional = true } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +frame-try-runtime = { path = "../../../../../substrate/frame/try-runtime", default-features = false, optional = true } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-authorship = { path = "../../../../../substrate/frame/authorship", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-asset-tx-payment = { path = "../../../../../substrate/frame/transaction-payment/asset-tx-payment", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../../../substrate/primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false} -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +polkadot-primitives = { path = "../../../../../polkadot/primitives", default-features = false } +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } -cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false} +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } +cumulus-pallet-session-benchmarking = { path = "../../../../pallets/session-benchmarking", default-features = false } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-primitives-core = { path = "../../../../primitives/core", default-features = false } @@ -77,7 +77,7 @@ parachain-info = { package = "staging-parachain-info", path = "../../../pallets/ parachains-common = { path = "../../../common", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -190,4 +190,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml index 08fa8b69127..3903bbbe31e 100644 --- a/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml +++ b/cumulus/parachains/runtimes/testing/rococo-parachain/Cargo.toml @@ -11,44 +11,44 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true} -frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../../../substrate/frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false} -pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false} -pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false} -pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false} -pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false} -sp-api = { path = "../../../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false} -sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false} -sp-core = { path = "../../../../../substrate/primitives/core", default-features = false} +frame-benchmarking = { path = "../../../../../substrate/frame/benchmarking", default-features = false, optional = true } +frame-executive = { path = "../../../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../../../substrate/frame/system", default-features = false } +frame-system-rpc-runtime-api = { path = "../../../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-assets = { path = "../../../../../substrate/frame/assets", default-features = false } +pallet-aura = { path = "../../../../../substrate/frame/aura", default-features = false } +pallet-balances = { path = "../../../../../substrate/frame/balances", default-features = false } +pallet-sudo = { path = "../../../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } +sp-api = { path = "../../../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../../../substrate/primitives/block-builder", default-features = false } +sp-consensus-aura = { path = "../../../../../substrate/primitives/consensus/aura", default-features = false } +sp-core = { path = "../../../../../substrate/primitives/core", default-features = false } sp-genesis-builder = { path = "../../../../../substrate/primitives/genesis-builder", default-features = false } -sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false} -sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../../../substrate/primitives/version", default-features = false} +sp-inherents = { path = "../../../../../substrate/primitives/inherents", default-features = false } +sp-offchain = { path = "../../../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../../../substrate/primitives/version", default-features = false } # Polkadot -pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false} -xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false} +pallet-xcm = { path = "../../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../../polkadot/parachain", default-features = false } +xcm = { package = "staging-xcm", path = "../../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../../polkadot/xcm/xcm-builder", default-features = false } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../../polkadot/xcm/xcm-executor", default-features = false } polkadot-runtime-common = { path = "../../../../../polkadot/runtime/common", default-features = false } # Cumulus cumulus-pallet-aura-ext = { path = "../../../../pallets/aura-ext", default-features = false } pallet-message-queue = { path = "../../../../../substrate/frame/message-queue", default-features = false } cumulus-pallet-dmp-queue = { path = "../../../../pallets/dmp-queue", default-features = false } -cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-pallet-xcm = { path = "../../../../pallets/xcm", default-features = false } cumulus-pallet-xcmp-queue = { path = "../../../../pallets/xcmp-queue", default-features = false } cumulus-ping = { path = "../../../pallets/ping", default-features = false } @@ -62,7 +62,7 @@ parachain-info = { package = "staging-parachain-info", path = "../../../pallets/ substrate-wasm-builder = { path = "../../../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-aura-ext/std", @@ -133,4 +133,4 @@ runtime-benchmarks = [ "xcm-executor/runtime-benchmarks", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/cumulus/parachains/testnets-common/Cargo.toml b/cumulus/parachains/testnets-common/Cargo.toml index e39cf91d3ab..87c570d10ea 100644 --- a/cumulus/parachains/testnets-common/Cargo.toml +++ b/cumulus/parachains/testnets-common/Cargo.toml @@ -17,9 +17,9 @@ frame-support = { path = "../../../substrate/frame/support", default-features = sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } # Polkadot -rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false} -westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false} -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} +rococo-runtime-constants = { path = "../../../polkadot/runtime/rococo/constants", default-features = false } +westend-runtime-constants = { path = "../../../polkadot/runtime/westend/constants", default-features = false } +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } # Cumulus @@ -29,7 +29,7 @@ polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "polkadot-core-primitives/std", diff --git a/cumulus/polkadot-parachain/Cargo.toml b/cumulus/polkadot-parachain/Cargo.toml index cae9b3b6889..67ebc244adf 100644 --- a/cumulus/polkadot-parachain/Cargo.toml +++ b/cumulus/polkadot-parachain/Cargo.toml @@ -39,7 +39,7 @@ parachains-common = { path = "../parachains/common" } # Substrate frame-benchmarking = { path = "../../substrate/frame/benchmarking" } frame-benchmarking-cli = { path = "../../substrate/utils/frame/benchmarking-cli" } -sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false} +sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } sp-io = { path = "../../substrate/primitives/io" } sp-core = { path = "../../substrate/primitives/core" } sp-session = { path = "../../substrate/primitives/session" } @@ -109,7 +109,7 @@ substrate-build-script-utils = { path = "../../substrate/utils/build-script-util assert_cmd = "2.0" nix = { version = "0.26.1", features = ["signal"] } tempfile = "3.8.0" -tokio = { version = "1.32.0", features = ["macros", "time", "parking_lot"] } +tokio = { version = "1.32.0", features = ["macros", "parking_lot", "time"] } wait-timeout = "0.2" [features] diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 19607eb7c18..096ae0a9620 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -7,20 +7,20 @@ license = "Apache-2.0" description = "Core primitives for Aura in Cumulus" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } # Substrate -sp-api = { path = "../../../substrate/primitives/api", default-features = false} -sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-consensus-aura = { path = "../../../substrate/primitives/consensus/aura", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "polkadot-core-primitives/std", diff --git a/cumulus/primitives/core/Cargo.toml b/cumulus/primitives/core/Cargo.toml index 23839a10e46..5f68a3546e6 100644 --- a/cumulus/primitives/core/Cargo.toml +++ b/cumulus/primitives/core/Cargo.toml @@ -7,23 +7,23 @@ license = "Apache-2.0" description = "Cumulus related core primitive types and traits" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -sp-api = { path = "../../../substrate/primitives/api", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false} +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } # Polkadot -polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false} -polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false} -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} +polkadot-core-primitives = { path = "../../../polkadot/core-primitives", default-features = false } +polkadot-parachain-primitives = { path = "../../../polkadot/parachain", default-features = false } +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "polkadot-core-primitives/std", diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 46b5da57f38..5d3c72a59f9 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -8,20 +8,20 @@ license = "Apache-2.0" [dependencies] async-trait = { version = "0.1.73", optional = true } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } tracing = { version = "0.1.37", optional = true } # Substrate -sc-client-api = { path = "../../../substrate/client/api", optional = true} -sp-api = { path = "../../../substrate/primitives/api", optional = true} -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true} -sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-storage = { path = "../../../substrate/primitives/storage", optional = true} -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false} +sc-client-api = { path = "../../../substrate/client/api", optional = true } +sp-api = { path = "../../../substrate/primitives/api", optional = true } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", optional = true } +sp-state-machine = { path = "../../../substrate/primitives/state-machine", optional = true } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-storage = { path = "../../../substrate/primitives/storage", optional = true } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } @@ -29,7 +29,7 @@ cumulus-relay-chain-interface = { path = "../../client/relay-chain-interface", o cumulus-test-relay-sproof-builder = { path = "../../test/relay-sproof-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/cumulus/primitives/proof-size-hostfunction/Cargo.toml b/cumulus/primitives/proof-size-hostfunction/Cargo.toml index 83dad428d00..576f7f5ae99 100644 --- a/cumulus/primitives/proof-size-hostfunction/Cargo.toml +++ b/cumulus/primitives/proof-size-hostfunction/Cargo.toml @@ -17,5 +17,5 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } [features] -default = [ "std" ] -std = [ "sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std" ] +default = ["std"] +std = ["sp-externalities/std", "sp-runtime-interface/std", "sp-trie/std"] diff --git a/cumulus/primitives/timestamp/Cargo.toml b/cumulus/primitives/timestamp/Cargo.toml index a0fea51f8db..ec5cb57419a 100644 --- a/cumulus/primitives/timestamp/Cargo.toml +++ b/cumulus/primitives/timestamp/Cargo.toml @@ -7,19 +7,19 @@ description = "Provides timestamp related functionality for parachains." license = "Apache-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } futures = "0.3.28" # Substrate -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-timestamp = { path = "../../../substrate/primitives/timestamp", default-features = false} +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-timestamp = { path = "../../../substrate/primitives/timestamp", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 45ce6701988..5f756c1e361 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -7,28 +7,28 @@ license = "Apache-2.0" description = "Helper datatypes for Cumulus" [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } log = { version = "0.4.20", default-features = false } # Substrate -frame-support = { path = "../../../substrate/frame/support", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot polkadot-runtime-common = { path = "../../../polkadot/runtime/common", default-features = false } polkadot-runtime-parachains = { path = "../../../polkadot/runtime/parachains", default-features = false } xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } -xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} +xcm-executor = { package = "staging-xcm-executor", path = "../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } pallet-xcm-benchmarks = { path = "../../../polkadot/xcm/pallet-xcm-benchmarks", default-features = false } # Cumulus cumulus-primitives-core = { path = "../core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 6ab873d320c..037b8600db6 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true publish = false [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } # Substrate sc-service = { path = "../../../substrate/client/service" } diff --git a/cumulus/test/relay-sproof-builder/Cargo.toml b/cumulus/test/relay-sproof-builder/Cargo.toml index b24ac308495..262a4ff92b5 100644 --- a/cumulus/test/relay-sproof-builder/Cargo.toml +++ b/cumulus/test/relay-sproof-builder/Cargo.toml @@ -7,22 +7,22 @@ license = "Apache-2.0" description = "Mocked relay state proof builder for testing Cumulus." [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } # Substrate -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false} -sp-trie = { path = "../../../substrate/primitives/trie", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-state-machine = { path = "../../../substrate/primitives/state-machine", default-features = false } +sp-trie = { path = "../../../substrate/primitives/trie", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } # Polkadot -polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false} +polkadot-primitives = { path = "../../../polkadot/primitives", default-features = false } # Cumulus cumulus-primitives-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-primitives-core/std", diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index 3ea51c1973f..7bdb69df2c2 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -10,38 +10,38 @@ codec = { package = "parity-scale-codec", version = "3.0.0", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # Substrate -frame-executive = { path = "../../../substrate/frame/executive", default-features = false} -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false} -pallet-balances = { path = "../../../substrate/frame/balances", default-features = false} +frame-executive = { path = "../../../substrate/frame/executive", default-features = false } +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } +pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } pallet-message-queue = { path = "../../../substrate/frame/message-queue", default-features = false } -pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false} -pallet-glutton = { path = "../../../substrate/frame/glutton", default-features = false} -pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false} -sp-api = { path = "../../../substrate/primitives/api", default-features = false} -sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false} -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false} -sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-session = { path = "../../../substrate/primitives/session", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false} -sp-version = { path = "../../../substrate/primitives/version", default-features = false} +pallet-sudo = { path = "../../../substrate/frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../substrate/frame/timestamp", default-features = false } +pallet-glutton = { path = "../../../substrate/frame/glutton", default-features = false } +pallet-transaction-payment = { path = "../../../substrate/frame/transaction-payment", default-features = false } +sp-api = { path = "../../../substrate/primitives/api", default-features = false } +sp-block-builder = { path = "../../../substrate/primitives/block-builder", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-genesis-builder = { path = "../../../substrate/primitives/genesis-builder", default-features = false } +sp-inherents = { path = "../../../substrate/primitives/inherents", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-offchain = { path = "../../../substrate/primitives/offchain", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-session = { path = "../../../substrate/primitives/session", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-transaction-pool = { path = "../../../substrate/primitives/transaction-pool", default-features = false } +sp-version = { path = "../../../substrate/primitives/version", default-features = false } # Cumulus -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } cumulus-primitives-core = { path = "../../primitives/core", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "cumulus-pallet-parachain-system/std", diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index e35173ecf16..271c450539b 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -13,7 +13,7 @@ path = "src/main.rs" async-trait = "0.1.73" clap = { version = "4.4.10", features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.0.0" } -criterion = { version = "0.5.1", features = [ "async_tokio" ] } +criterion = { version = "0.5.1", features = ["async_tokio"] } jsonrpsee = { version = "0.16.2", features = ["server"] } rand = "0.8.5" serde = { version = "1.0.193", features = ["derive"] } @@ -44,7 +44,7 @@ sp-core = { path = "../../../substrate/primitives/core" } sp-io = { path = "../../../substrate/primitives/io" } sp-api = { path = "../../../substrate/primitives/api" } sp-keyring = { path = "../../../substrate/primitives/keyring" } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } sp-state-machine = { path = "../../../substrate/primitives/state-machine" } sp-tracing = { path = "../../../substrate/primitives/tracing" } sp-timestamp = { path = "../../../substrate/primitives/timestamp" } @@ -77,7 +77,7 @@ cumulus-test-runtime = { path = "../runtime" } cumulus-relay-chain-minimal-node = { path = "../../client/relay-chain-minimal-node" } cumulus-client-pov-recovery = { path = "../../client/pov-recovery" } cumulus-test-relay-sproof-builder = { path = "../relay-sproof-builder" } -cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook",] } +cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false, features = ["parameterized-consensus-hook"] } pallet-timestamp = { path = "../../../substrate/frame/timestamp" } [dev-dependencies] diff --git a/developer-hub/Cargo.toml b/developer-hub/Cargo.toml index c19d5480ec4..56f279c7e10 100644 --- a/developer-hub/Cargo.toml +++ b/developer-hub/Cargo.toml @@ -14,7 +14,7 @@ version = "0.0.1" # Needed for all FRAME-based code parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } -frame = { path = "../substrate/frame", features = ["runtime", "experimental"] } +frame = { path = "../substrate/frame", features = ["experimental", "runtime"] } pallet-examples = { path = "../substrate/frame/examples" } pallet-default-config-example = { path = "../substrate/frame/examples/default-config" } @@ -63,4 +63,4 @@ parity-scale-codec = "3.6.5" scale-info = "2.9.0" [features] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 1e05755a9b8..1f68e2979f2 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -153,6 +153,10 @@ Or if you have opened PR and you're member of `paritytech` - you can use command - `bot update-ui latest -v CMD_IMAGE=paritytech/ci-unified:bullseye-1.70.0-2023-05-23 --rust_version=1.70.0` - will run the tests for the specified rust version and specified image +## Feature Propagation + +We use [zepter](https://github.com/ggwpez/zepter) to enforce features are propagated between crates correctly. + ## Command Bot If you're member of **paritytech** org - you can use command-bot to run various of common commands in CI: diff --git a/docs/STYLE_GUIDE.md b/docs/STYLE_GUIDE.md index 1ae9bc5003f..3df65d9699a 100644 --- a/docs/STYLE_GUIDE.md +++ b/docs/STYLE_GUIDE.md @@ -2,9 +2,11 @@ title: Style Guide for Rust in the Polkadot-SDK --- -Where possible these styles are enforced by settings in `rustfmt.toml` so if you run `cargo fmt` +Where possible these styles are enforced by settings in `rustfmt.toml` so if you run `cargo +nightly fmt` then you will adhere to most of these style guidelines automatically. +To see exactly which nightly version is used, check our CI job logs. + # Formatting - Indent using tabs. @@ -150,31 +152,13 @@ let mut target_path = # Manifest Formatting -> **TLDR** -> You can use the CLI tool [Zepter](https://crates.io/crates/zepter) to -> format the files: `zepter format features --fix` (or `zepter f f -f`). +We use [taplo](https://taplo.tamasfe.dev/) to enforce consistent TOML formatting. -Rust `Cargo.toml` files need to respect certain formatting rules. All entries -need to be alphabetically sorted. This makes it easier to read them and insert -new entries. The exhaustive list of rules is enforced by the CI. The general -format looks like this: +You can install it with `cargo install taplo-cli` and format your code with `taplo format --config .config/taplo.toml`. -- The feature is written as a single line if it fits within 80 chars: +See the config file for the exact rules. -```toml -[features] -default = [ "std" ] -``` +You may find useful -- Otherwise the feature is broken down into multiple lines with one entry per - line. Each line is padded with one tab and no trailing spaces but a trailing - comma. - -```toml -[features] -default = [ - "loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong", - # Comments go here as well ;) - "std", -] -``` +- [Taplo VSCode extension](https://marketplace.visualstudio.com/items?itemName=tamasfe.even-better-toml) +- For NeoVim, [taplo is avaliable with Mason](https://github.com/williamboman/mason-lspconfig.nvim#available-lsp-servers) diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index fb161848fb6..c0227823c6b 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -23,10 +23,10 @@ default-run = "polkadot" [dependencies] color-eyre = { version = "0.6.1", default-features = false } -tikv-jemallocator = { version = "0.5.0", optional = true, features = [ "unprefixed_malloc_on_supported_platforms" ] } +tikv-jemallocator = { version = "0.5.0", optional = true, features = ["unprefixed_malloc_on_supported_platforms"] } # Crates in our workspace, defined as dependencies so we can pass them feature flags. -polkadot-cli = { path = "cli", features = [ "westend-native", "rococo-native" ] } +polkadot-cli = { path = "cli", features = ["rococo-native", "westend-native"] } polkadot-node-core-pvf = { path = "node/core/pvf" } polkadot-node-core-pvf-prepare-worker = { path = "node/core/pvf/prepare-worker" } polkadot-overseer = { path = "node/overseer" } @@ -36,7 +36,7 @@ polkadot-node-core-pvf-common = { path = "node/core/pvf/common" } polkadot-node-core-pvf-execute-worker = { path = "node/core/pvf/execute-worker" } [target.'cfg(target_os = "linux")'.dependencies] -tikv-jemallocator = { version = "0.5.0", features = [ "unprefixed_malloc_on_supported_platforms" ] } +tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"] } [dev-dependencies] assert_cmd = "2.0.4" @@ -53,23 +53,23 @@ substrate-build-script-utils = { path = "../substrate/utils/build-script-utils" maintenance = { status = "actively-developed" } [features] -runtime-benchmarks = [ "polkadot-cli/runtime-benchmarks" ] -try-runtime = [ "polkadot-cli/try-runtime" ] -fast-runtime = [ "polkadot-cli/fast-runtime" ] -runtime-metrics = [ "polkadot-cli/runtime-metrics" ] -pyroscope = [ "polkadot-cli/pyroscope" ] +runtime-benchmarks = ["polkadot-cli/runtime-benchmarks"] +try-runtime = ["polkadot-cli/try-runtime"] +fast-runtime = ["polkadot-cli/fast-runtime"] +runtime-metrics = ["polkadot-cli/runtime-metrics"] +pyroscope = ["polkadot-cli/pyroscope"] jemalloc-allocator = [ "dep:tikv-jemallocator", "polkadot-node-core-pvf-prepare-worker/jemalloc-allocator", "polkadot-node-core-pvf/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator", ] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] +network-protocol-staging = ["polkadot-cli/network-protocol-staging"] # Enables timeout-based tests supposed to be run only in CI environment as they may be flaky # when run locally depending on system load -ci-only-tests = [ "polkadot-node-core-pvf/ci-only-tests" ] +ci-only-tests = ["polkadot-node-core-pvf/ci-only-tests"] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] @@ -89,12 +89,12 @@ assets = [ [ "target/release/polkadot-prepare-worker", "/usr/lib/polkadot/", - "755" + "755", ], [ "target/release/polkadot-execute-worker", "/usr/lib/polkadot/", - "755" + "755", ], [ "scripts/packaging/polkadot.service", diff --git a/polkadot/cli/Cargo.toml b/polkadot/cli/Cargo.toml index 768077b8bf2..0882d20f2b7 100644 --- a/polkadot/cli/Cargo.toml +++ b/polkadot/cli/Cargo.toml @@ -43,8 +43,8 @@ sc-storage-monitor = { path = "../../substrate/client/storage-monitor" } substrate-build-script-utils = { path = "../../substrate/utils/build-script-utils" } [features] -default = [ "cli", "db", "full-node" ] -db = [ "service/db" ] +default = ["cli", "db", "full-node"] +db = ["service/db"] cli = [ "clap", "frame-benchmarking-cli", @@ -60,19 +60,19 @@ runtime-benchmarks = [ "sc-service?/runtime-benchmarks", "service/runtime-benchmarks", ] -full-node = [ "service/full-node" ] -try-runtime = [ "service/try-runtime", "try-runtime-cli/try-runtime" ] -fast-runtime = [ "service/fast-runtime" ] -pyroscope = [ "pyro", "pyroscope_pprofrs" ] +full-node = ["service/full-node"] +try-runtime = ["service/try-runtime", "try-runtime-cli/try-runtime"] +fast-runtime = ["service/fast-runtime"] +pyroscope = ["pyro", "pyroscope_pprofrs"] # Configure the native runtimes to use. -westend-native = [ "service/westend-native" ] -rococo-native = [ "service/rococo-native" ] +westend-native = ["service/westend-native"] +rococo-native = ["service/rococo-native"] -malus = [ "full-node", "service/malus" ] +malus = ["full-node", "service/malus"] runtime-metrics = [ "polkadot-node-metrics/runtime-metrics", "service/runtime-metrics", ] -network-protocol-staging = [ "service/network-protocol-staging" ] +network-protocol-staging = ["service/network-protocol-staging"] diff --git a/polkadot/core-primitives/Cargo.toml b/polkadot/core-primitives/Cargo.toml index f843ec17943..1b8da759c15 100644 --- a/polkadot/core-primitives/Cargo.toml +++ b/polkadot/core-primitives/Cargo.toml @@ -11,10 +11,10 @@ sp-core = { path = "../../substrate/primitives/core", default-features = false } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive" ] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } [features] -default = [ "std" ] +default = ["std"] std = [ "parity-scale-codec/std", "scale-info/std", diff --git a/polkadot/erasure-coding/Cargo.toml b/polkadot/erasure-coding/Cargo.toml index ccfe7f14eb4..c965f5d70aa 100644 --- a/polkadot/erasure-coding/Cargo.toml +++ b/polkadot/erasure-coding/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true polkadot-primitives = { path = "../primitives" } polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../node/primitives" } novelpoly = { package = "reed-solomon-novelpoly", version = "1.0.0" } -parity-scale-codec = { version = "3.6.1", default-features = false, features = ["std", "derive"] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "std"] } sp-core = { path = "../../substrate/primitives/core" } sp-trie = { path = "../../substrate/primitives/trie" } thiserror = "1.0.48" diff --git a/polkadot/node/collation-generation/Cargo.toml b/polkadot/node/collation-generation/Cargo.toml index c1848f47fc6..e0c86d233f9 100644 --- a/polkadot/node/collation-generation/Cargo.toml +++ b/polkadot/node/collation-generation/Cargo.toml @@ -15,7 +15,7 @@ polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } sp-core = { path = "../../../substrate/primitives/core" } -sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } +sp-maybe-compressed-blob = { path = "../../../substrate/primitives/maybe-compressed-blob" } thiserror = "1.0.48" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml index 59a6708f17e..9516dc52a30 100644 --- a/polkadot/node/core/approval-voting/Cargo.toml +++ b/polkadot/node/core/approval-voting/Cargo.toml @@ -39,7 +39,8 @@ rand = "0.8.5" [dev-dependencies] async-trait = "0.1.57" parking_lot = "0.12.0" -rand_core = "0.5.1" # should match schnorrkel +# rand_core should match schnorrkel +rand_core = "0.5.1" sp-keyring = { path = "../../../../substrate/primitives/keyring" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } sp-core = { path = "../../../../substrate/primitives/core" } diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index 13ecb356f2c..d27e2343925 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "polkadot-node-core-provisioner" version = "1.0.0" -description="Responsible for assembling a relay chain block from a set of available parachain candidates" +description = "Responsible for assembling a relay chain block from a set of available parachain candidates" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/core/pvf/Cargo.toml b/polkadot/node/core/pvf/Cargo.toml index 05509a5f853..2d15a25b887 100644 --- a/polkadot/node/core/pvf/Cargo.toml +++ b/polkadot/node/core/pvf/Cargo.toml @@ -39,7 +39,7 @@ polkadot-node-core-pvf-execute-worker = { path = "execute-worker", optional = tr [dev-dependencies] assert_matches = "1.4.0" -criterion = { version = "0.4.0", default-features = false, features = ["cargo_bench_support", "async_tokio"] } +criterion = { version = "0.4.0", default-features = false, features = ["async_tokio", "cargo_bench_support"] } hex-literal = "0.4.1" polkadot-node-core-pvf-common = { path = "common", features = ["test-utils"] } @@ -62,7 +62,7 @@ harness = false [features] ci-only-tests = [] -jemalloc-allocator = [ "polkadot-node-core-pvf-common/jemalloc-allocator" ] +jemalloc-allocator = ["polkadot-node-core-pvf-common/jemalloc-allocator"] # This feature is used to export test code to other crates without putting it in the production build. test-utils = [ "polkadot-node-core-pvf-execute-worker", diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index 40e0ff4f0a1..6e6206cf1b9 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -10,7 +10,7 @@ license.workspace = true cpu-time = "1.0.0" gum = { package = "tracing-gum", path = "../../../gum" } os_pipe = "1.1.4" -nix = { version = "0.27.1", features = ["resource", "process"]} +nix = { version = "0.27.1", features = ["process", "resource"] } libc = "0.2.139" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 005f2e93511..4e53f7f46ca 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -16,7 +16,7 @@ tracking-allocator = { package = "staging-tracking-allocator", path = "../../../ tikv-jemalloc-ctl = { version = "0.5.0", optional = true } tikv-jemallocator = { version = "0.5.0", optional = true } os_pipe = "1.1.4" -nix = { version = "0.27.1", features = ["resource", "process"]} +nix = { version = "0.27.1", features = ["process", "resource"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } diff --git a/polkadot/node/core/runtime-api/Cargo.toml b/polkadot/node/core/runtime-api/Cargo.toml index f324f1e79c4..965b280a747 100644 --- a/polkadot/node/core/runtime-api/Cargo.toml +++ b/polkadot/node/core/runtime-api/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "polkadot-node-core-runtime-api" version = "1.0.0" -description="Wrapper around the parachain-related runtime APIs" +description = "Wrapper around the parachain-related runtime APIs" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index 1ffaf6160ba..cd6e19b3dd2 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.38", features = ["full", "extra-traits"] } +syn = { version = "2.0.38", features = ["extra-traits", "full"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "1.1.3" diff --git a/polkadot/node/malus/Cargo.toml b/polkadot/node/malus/Cargo.toml index cd0a25d7593..1958bcf4620 100644 --- a/polkadot/node/malus/Cargo.toml +++ b/polkadot/node/malus/Cargo.toml @@ -26,7 +26,7 @@ path = "../../src/bin/prepare-worker.rs" doc = false [dependencies] -polkadot-cli = { path = "../../cli", features = [ "malus", "rococo-native", "westend-native" ] } +polkadot-cli = { path = "../../cli", features = ["malus", "rococo-native", "westend-native"] } polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-node-subsystem-types = { path = "../subsystem-types" } @@ -62,4 +62,4 @@ substrate-build-script-utils = { path = "../../../substrate/utils/build-script-u [features] default = [] -fast-runtime = [ "polkadot-cli/fast-runtime" ] +fast-runtime = ["polkadot-cli/fast-runtime"] diff --git a/polkadot/node/metrics/Cargo.toml b/polkadot/node/metrics/Cargo.toml index e13ae63199f..e8e00a64c05 100644 --- a/polkadot/node/metrics/Cargo.toml +++ b/polkadot/node/metrics/Cargo.toml @@ -11,7 +11,7 @@ futures = "0.3.21" futures-timer = "3.0.2" gum = { package = "tracing-gum", path = "../gum" } -metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features = ["futures_channel"] } # Both `sc-service` and `sc-cli` are required by runtime metrics `logger_hook()`. sc-service = { path = "../../../substrate/client/service" } sc-cli = { path = "../../../substrate/client/cli" } @@ -28,11 +28,11 @@ assert_cmd = "2.0.4" tempfile = "3.2.0" hyper = { version = "0.14.20", default-features = false, features = ["http1", "tcp"] } tokio = "1.24.2" -polkadot-test-service = { path = "../test/service", features=["runtime-metrics"]} +polkadot-test-service = { path = "../test/service", features = ["runtime-metrics"] } substrate-test-utils = { path = "../../../substrate/test-utils" } sc-service = { path = "../../../substrate/client/service" } sp-keyring = { path = "../../../substrate/primitives/keyring" } -prometheus-parse = {version = "0.2.2"} +prometheus-parse = { version = "0.2.2" } [features] default = [] diff --git a/polkadot/node/network/approval-distribution/Cargo.toml b/polkadot/node/network/approval-distribution/Cargo.toml index f8a7cc15f87..7db4aa77b7a 100644 --- a/polkadot/node/network/approval-distribution/Cargo.toml +++ b/polkadot/node/network/approval-distribution/Cargo.toml @@ -31,7 +31,8 @@ polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } assert_matches = "1.4.0" schnorrkel = { version = "0.9.1", default-features = false } -rand_core = "0.5.1" # should match schnorrkel +# rand_core should match schnorrkel +rand_core = "0.5.1" rand_chacha = "0.3.1" env_logger = "0.9.0" log = "0.4.17" diff --git a/polkadot/node/overseer/Cargo.toml b/polkadot/node/overseer/Cargo.toml index ac1e4443f0c..d9266055a39 100644 --- a/polkadot/node/overseer/Cargo.toml +++ b/polkadot/node/overseer/Cargo.toml @@ -17,14 +17,14 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem-types = { path = "../subsystem-types" } polkadot-node-metrics = { path = "../metrics" } polkadot-primitives = { path = "../../primitives" } -orchestra = { version = "0.3.3", default-features = false, features=["futures_channel"] } +orchestra = { version = "0.3.3", default-features = false, features = ["futures_channel"] } gum = { package = "tracing-gum", path = "../gum" } sp-core = { path = "../../../substrate/primitives/core" } async-trait = "0.1.57" tikv-jemalloc-ctl = { version = "0.5.0", optional = true } [dev-dependencies] -metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features = ["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } futures = { version = "0.3.21", features = ["thread-pool"] } femme = "2.2.1" @@ -36,8 +36,8 @@ node-test-helpers = { package = "polkadot-node-subsystem-test-helpers", path = " tikv-jemalloc-ctl = "0.5.0" [features] -default = [ "futures_channel" ] -dotgraph = [ "orchestra/dotgraph" ] -expand = [ "orchestra/expand" ] -futures_channel = [ "metered/futures_channel", "orchestra/futures_channel" ] -jemalloc-allocator = [ "dep:tikv-jemalloc-ctl" ] +default = ["futures_channel"] +dotgraph = ["orchestra/dotgraph"] +expand = ["orchestra/expand"] +futures_channel = ["metered/futures_channel", "orchestra/futures_channel"] +jemalloc-allocator = ["dep:tikv-jemalloc-ctl"] diff --git a/polkadot/node/service/Cargo.toml b/polkadot/node/service/Cargo.toml index b5aa46a41dc..b48302edf52 100644 --- a/polkadot/node/service/Cargo.toml +++ b/polkadot/node/service/Cargo.toml @@ -148,9 +148,9 @@ serial_test = "2.0.0" tempfile = "3.2" [features] -default = [ "db", "full-node" ] +default = ["db", "full-node"] -db = [ "service/rocksdb" ] +db = ["service/rocksdb"] full-node = [ "kvdb-rocksdb", @@ -181,8 +181,8 @@ full-node = [ ] # Configure the native runtimes to use. -westend-native = [ "westend-runtime", "westend-runtime-constants" ] -rococo-native = [ "rococo-runtime", "rococo-runtime-constants" ] +westend-native = ["westend-runtime", "westend-runtime-constants"] +rococo-native = ["rococo-runtime", "rococo-runtime-constants"] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", @@ -219,7 +219,7 @@ fast-runtime = [ "westend-runtime?/fast-runtime", ] -malus = [ "full-node" ] +malus = ["full-node"] runtime-metrics = [ "polkadot-runtime-parachains/runtime-metrics", "rococo-runtime?/runtime-metrics", diff --git a/polkadot/node/subsystem-types/Cargo.toml b/polkadot/node/subsystem-types/Cargo.toml index 71e5257cab9..dfda6c1b3c5 100644 --- a/polkadot/node/subsystem-types/Cargo.toml +++ b/polkadot/node/subsystem-types/Cargo.toml @@ -14,7 +14,7 @@ polkadot-node-primitives = { path = "../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-statement-table = { path = "../../statement-table" } polkadot-node-jaeger = { path = "../jaeger" } -orchestra = { version = "0.3.3", default-features = false, features=["futures_channel"] } +orchestra = { version = "0.3.3", default-features = false, features = ["futures_channel"] } sc-network = { path = "../../../substrate/client/network" } sp-api = { path = "../../../substrate/primitives/api" } sp-blockchain = { path = "../../../substrate/primitives/blockchain" } diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index d9364e2c2c0..94e1f5a3f12 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -29,7 +29,7 @@ polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-primitives = { path = "../../primitives" } polkadot-node-primitives = { path = "../primitives" } polkadot-overseer = { path = "../overseer" } -metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features=["futures_channel"] } +metered = { package = "prioritized-metered-channel", version = "0.5.1", default-features = false, features = ["futures_channel"] } sp-core = { path = "../../../substrate/primitives/core" } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto" } @@ -37,7 +37,7 @@ sp-keystore = { path = "../../../substrate/primitives/keystore" } sc-client-api = { path = "../../../substrate/client/api" } kvdb = "0.13.0" -parity-db = { version = "0.4.8"} +parity-db = { version = "0.4.8" } [dev-dependencies] assert_matches = "1.4.0" diff --git a/polkadot/node/test/client/Cargo.toml b/polkadot/node/test/client/Cargo.toml index bc4ff74be4b..646f1ea9732 100644 --- a/polkadot/node/test/client/Cargo.toml +++ b/polkadot/node/test/client/Cargo.toml @@ -38,7 +38,7 @@ sp-keyring = { path = "../../../../substrate/primitives/keyring" } futures = "0.3.21" [features] -runtime-benchmarks= [ +runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "polkadot-primitives/runtime-benchmarks", "polkadot-test-runtime/runtime-benchmarks", diff --git a/polkadot/node/test/service/Cargo.toml b/polkadot/node/test/service/Cargo.toml index 437fa66b75a..aa143f40300 100644 --- a/polkadot/node/test/service/Cargo.toml +++ b/polkadot/node/test/service/Cargo.toml @@ -63,8 +63,8 @@ substrate-test-utils = { path = "../../../../substrate/test-utils" } tokio = { version = "1.24.2", features = ["macros"] } [features] -runtime-metrics=[ "polkadot-test-runtime/runtime-metrics" ] -runtime-benchmarks= [ +runtime-metrics = ["polkadot-test-runtime/runtime-metrics"] +runtime-benchmarks = [ "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-staking/runtime-benchmarks", diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml index 9aa4dc10a5f..7c8935d987e 100644 --- a/polkadot/parachain/Cargo.toml +++ b/polkadot/parachain/Cargo.toml @@ -10,7 +10,7 @@ version = "1.0.0" # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive" ] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-std = { path = "../../substrate/primitives/std", default-features = false } sp-runtime = { path = "../../substrate/primitives/runtime", default-features = false, features = ["serde"] } @@ -21,10 +21,10 @@ derive_more = "0.99.11" bounded-collections = { version = "0.1.8", default-features = false, features = ["serde"] } # all optional crates. -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } [features] -default = [ "std" ] +default = ["std"] wasm-api = [] std = [ "bounded-collections/std", @@ -37,4 +37,4 @@ std = [ "sp-std/std", "sp-weights/std", ] -runtime-benchmarks = [ "sp-runtime/runtime-benchmarks" ] +runtime-benchmarks = ["sp-runtime/runtime-benchmarks"] diff --git a/polkadot/parachain/test-parachains/Cargo.toml b/polkadot/parachain/test-parachains/Cargo.toml index 3252d1f83cd..7bbeda4893b 100644 --- a/polkadot/parachain/test-parachains/Cargo.toml +++ b/polkadot/parachain/test-parachains/Cargo.toml @@ -18,5 +18,5 @@ halt = { package = "test-parachain-halt", path = "halt" } sp-core = { path = "../../../substrate/primitives/core" } [features] -default = [ "std" ] -std = [ "adder/std", "halt/std", "parity-scale-codec/std" ] +default = ["std"] +std = ["adder/std", "halt/std", "parity-scale-codec/std"] diff --git a/polkadot/parachain/test-parachains/adder/Cargo.toml b/polkadot/parachain/test-parachains/adder/Cargo.toml index 1a47328b28e..ee0f6f551af 100644 --- a/polkadot/parachain/test-parachains/adder/Cargo.toml +++ b/polkadot/parachain/test-parachains/adder/Cargo.toml @@ -9,18 +9,18 @@ authors.workspace = true publish = false [dependencies] -parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = [ "wasm-api" ] } +parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } -dlmalloc = { version = "0.2.4", features = [ "global" ] } +dlmalloc = { version = "0.2.4", features = ["global"] } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = [ "disable_allocator" ] } +sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = ["disable_allocator"] } [build-dependencies] substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] -std = [ "parachain/std", "parity-scale-codec/std", "sp-io/std", "sp-std/std" ] +default = ["std"] +std = ["parachain/std", "parity-scale-codec/std", "sp-io/std", "sp-std/std"] diff --git a/polkadot/parachain/test-parachains/halt/Cargo.toml b/polkadot/parachain/test-parachains/halt/Cargo.toml index cb2918273eb..428f33f730e 100644 --- a/polkadot/parachain/test-parachains/halt/Cargo.toml +++ b/polkadot/parachain/test-parachains/halt/Cargo.toml @@ -15,5 +15,5 @@ substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } rustversion = "1.0.6" [features] -default = [ "std" ] +default = ["std"] std = [] diff --git a/polkadot/parachain/test-parachains/undying/Cargo.toml b/polkadot/parachain/test-parachains/undying/Cargo.toml index 273eef4b63a..e763b65cfdd 100644 --- a/polkadot/parachain/test-parachains/undying/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/Cargo.toml @@ -9,21 +9,21 @@ edition.workspace = true license.workspace = true [dependencies] -parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = [ "wasm-api" ] } +parachain = { package = "polkadot-parachain-primitives", path = "../..", default-features = false, features = ["wasm-api"] } parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } sp-std = { path = "../../../../substrate/primitives/std", default-features = false } tiny-keccak = { version = "2.0.2", features = ["keccak"] } -dlmalloc = { version = "0.2.4", features = [ "global" ] } +dlmalloc = { version = "0.2.4", features = ["global"] } log = { version = "0.4.17", default-features = false } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = [ "disable_allocator" ] } +sp-io = { path = "../../../../substrate/primitives/io", default-features = false, features = ["disable_allocator"] } [build-dependencies] substrate-wasm-builder = { path = "../../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] std = [ "log/std", "parachain/std", diff --git a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml index b7eab13bb9c..0de349eac01 100644 --- a/polkadot/parachain/test-parachains/undying/collator/Cargo.toml +++ b/polkadot/parachain/test-parachains/undying/collator/Cargo.toml @@ -41,4 +41,4 @@ sp-keyring = { path = "../../../../../substrate/primitives/keyring" } tokio = { version = "1.24.2", features = ["macros"] } [features] -network-protocol-staging = [ "polkadot-cli/network-protocol-staging" ] +network-protocol-staging = ["polkadot-cli/network-protocol-staging"] diff --git a/polkadot/primitives/Cargo.toml b/polkadot/primitives/Cargo.toml index 78a25d67081..5e746c622cf 100644 --- a/polkadot/primitives/Cargo.toml +++ b/polkadot/primitives/Cargo.toml @@ -11,7 +11,7 @@ bitvec = { version = "1.0.0", default-features = false, features = ["alloc", "se hex-literal = "0.4.1" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["bit-vec", "derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } application-crypto = { package = "sp-application-crypto", path = "../../substrate/primitives/application-crypto", default-features = false, features = ["serde"] } inherents = { package = "sp-inherents", path = "../../substrate/primitives/inherents", default-features = false } @@ -30,7 +30,7 @@ polkadot-core-primitives = { path = "../core-primitives", default-features = fal polkadot-parachain-primitives = { path = "../parachain", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "application-crypto/std", "bitvec/std", diff --git a/polkadot/roadmap/implementers-guide/book.toml b/polkadot/roadmap/implementers-guide/book.toml index f677c0d59c0..f91591ff170 100644 --- a/polkadot/roadmap/implementers-guide/book.toml +++ b/polkadot/roadmap/implementers-guide/book.toml @@ -15,7 +15,7 @@ renderer = ["html"] [output.html] additional-css = ["last-changed.css"] -additional-js = ["mermaid.min.js", "mermaid-init.js"] +additional-js = ["mermaid-init.js", "mermaid.min.js"] # Repository URL used in the last-changed link. git-repository-url = "https://github.com/paritytech/polkadot-sdk" diff --git a/polkadot/roadmap/phase-1.toml b/polkadot/roadmap/phase-1.toml index 50ef1f741fe..3a5f0d752de 100644 --- a/polkadot/roadmap/phase-1.toml +++ b/polkadot/roadmap/phase-1.toml @@ -14,7 +14,7 @@ requires = ["phase-0"] items = [ { label = "Buffer submitted parachain candidate until considered available." }, { label = "Validators submit signed bitfields re: availability of parachains" }, - { label = "relay chain fully includes candidate once considered available" } + { label = "relay chain fully includes candidate once considered available" }, ] [[group]] @@ -23,8 +23,8 @@ label = "Secondary checks and self-selection by validators" requires = ["two-phase-inclusion"] items = [ { label = "Extract #VCheck for all checkable candidates" }, - { label = "Maintain a frontier of candidates that are likely to be checked soon" }, { label = "Listen for new reports on candidates and new checks to update frontier" }, + { label = "Maintain a frontier of candidates that are likely to be checked soon" }, ] [[group]] @@ -32,8 +32,8 @@ name = "runtime-availability-validity-slashing" label = "Availability and Validity slashing in the runtime" requires = ["two-phase-inclusion"] items = [ + { label = "Submit secondary checks to runtime", port = "submitsecondary", requires = ["secondary-checking"] }, { label = "Track all candidates within the slash period as well as their session" }, - { label = "Submit secondary checks to runtime", port = "submitsecondary", requires = ["secondary-checking"]}, { label = "Track reports and attestatations for candidates" }, ] @@ -41,10 +41,10 @@ items = [ name = "non-direct-ancestor" label = "Allow candidates with non-direct ancestor" items = [ - { label = "Extend GlobalValidationData with random seed and session index"}, { label = "Block author can provide minimally-attested candidate with older relay parent" }, - { label = "Runtime can accept and process candidates with older relay-parent" }, + { label = "Extend GlobalValidationData with random seed and session index" }, { label = "Revise availability-store pruning to ensure only needed data is kept" }, + { label = "Runtime can accept and process candidates with older relay-parent" }, ] [[group]] @@ -52,13 +52,13 @@ name = "grandpa-voting-rule" label = "GRANDPA voting rule to follow valid/available chains" requires = ["runtime-availability-validity-slashing"] items = [ - { label = "Add a utility to flag a block and all of its ancestors as abandoned" }, { label = "Accept new blocks on abandoned but mark them abandoned as well." }, + { label = "Add a utility to flag a block and all of its ancestors as abandoned" }, { label = "Do not vote or build on abandoned chains" }, ] [[group]] name = "phase-1" label = "Phase 1: Availability and Validity" -requires = ["non-direct-ancestor", "grandpa-voting-rule", "runtime-availability-validity-slashing"] +requires = ["grandpa-voting-rule", "non-direct-ancestor", "runtime-availability-validity-slashing"] items = [] diff --git a/polkadot/runtime/common/Cargo.toml b/polkadot/runtime/common/Cargo.toml index 053eb88967f..7e8461c73ef 100644 --- a/polkadot/runtime/common/Cargo.toml +++ b/polkadot/runtime/common/Cargo.toml @@ -21,11 +21,11 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false , features=["serde"]} +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features=["serde"] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false , features=["serde"]} -sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false, features=["serde"] } +sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } +sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } +sp-npos-elections = { path = "../../../substrate/primitives/npos-elections", default-features = false, features = ["serde"] } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-balances = { path = "../../../substrate/frame/balances", default-features = false } @@ -70,7 +70,7 @@ libsecp256k1 = "0.7.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } [features] -default = [ "std" ] +default = ["std"] no_std = [] std = [ "bitvec/std", diff --git a/polkadot/runtime/common/slot_range_helper/Cargo.toml b/polkadot/runtime/common/slot_range_helper/Cargo.toml index 59c76a6cabb..f31811c1272 100644 --- a/polkadot/runtime/common/slot_range_helper/Cargo.toml +++ b/polkadot/runtime/common/slot_range_helper/Cargo.toml @@ -14,5 +14,5 @@ sp-std = { package = "sp-std", path = "../../../../substrate/primitives/std", de sp-runtime = { path = "../../../../substrate/primitives/runtime", default-features = false } [features] -default = [ "std" ] -std = [ "parity-scale-codec/std", "sp-runtime/std", "sp-std/std" ] +default = ["std"] +std = ["parity-scale-codec/std", "sp-runtime/std", "sp-std/std"] diff --git a/polkadot/runtime/metrics/Cargo.toml b/polkadot/runtime/metrics/Cargo.toml index cdfab82d00c..ad4a2fa9207 100644 --- a/polkadot/runtime/metrics/Cargo.toml +++ b/polkadot/runtime/metrics/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true description = "Runtime metric interface for the Polkadot node" [dependencies] -sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false} +sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } parity-scale-codec = { version = "3.6.1", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } @@ -16,7 +16,7 @@ frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-f bs58 = { version = "0.5.0", default-features = false, features = ["alloc"] } [features] -default = [ "std" ] +default = ["std"] std = [ "bs58/std", "frame-benchmarking?/std", @@ -25,4 +25,4 @@ std = [ "sp-std/std", "sp-tracing/std", ] -runtime-metrics = [ "frame-benchmarking", "sp-tracing/with-tracing" ] +runtime-metrics = ["frame-benchmarking", "sp-tracing/with-tracing"] diff --git a/polkadot/runtime/parachains/Cargo.toml b/polkadot/runtime/parachains/Cargo.toml index 9e8c9a5c759..2627bc9ef49 100644 --- a/polkadot/runtime/parachains/Cargo.toml +++ b/polkadot/runtime/parachains/Cargo.toml @@ -13,7 +13,7 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ log = { version = "0.4.17", default-features = false } rustc-hex = { version = "2.1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } derive_more = "0.99.17" bitflags = "1.3.2" @@ -21,10 +21,10 @@ sp-api = { path = "../../../substrate/primitives/api", default-features = false inherents = { package = "sp-inherents", path = "../../../substrate/primitives/inherents", default-features = false } sp-std = { package = "sp-std", path = "../../../substrate/primitives/std", default-features = false } sp-io = { path = "../../../substrate/primitives/io", default-features = false } -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features=["serde"] } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false, features = ["serde"] } sp-session = { path = "../../../substrate/primitives/session", default-features = false } -sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features=["serde"] } -sp-core = { path = "../../../substrate/primitives/core", default-features = false, features=["serde"] } +sp-staking = { path = "../../../substrate/primitives/staking", default-features = false, features = ["serde"] } +sp-core = { path = "../../../substrate/primitives/core", default-features = false, features = ["serde"] } sp-keystore = { path = "../../../substrate/primitives/keystore", optional = true } sp-application-crypto = { path = "../../../substrate/primitives/application-crypto", default-features = false, optional = true } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false, optional = true } @@ -50,7 +50,7 @@ rand = { version = "0.8.5", default-features = false } rand_chacha = { version = "0.3.1", default-features = false } static_assertions = { version = "1.1.0", optional = true } polkadot-parachain-primitives = { path = "../../parachain", default-features = false } -polkadot-runtime-metrics = { path = "../metrics", default-features = false} +polkadot-runtime-metrics = { path = "../metrics", default-features = false } polkadot-core-primitives = { path = "../../core-primitives", default-features = false } [dev-dependencies] @@ -66,7 +66,7 @@ assert_matches = "1" serde_json = "1.0.108" [features] -default = [ "std" ] +default = ["std"] no_std = [] std = [ "bitvec/std", diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index 926768e9748..1edce2aa44d 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -53,7 +53,7 @@ pallet-collective = { path = "../../../substrate/frame/collective", default-feat pallet-conviction-voting = { path = "../../../substrate/frame/conviction-voting", default-features = false } pallet-democracy = { path = "../../../substrate/frame/democracy", default-features = false } pallet-elections-phragmen = { path = "../../../substrate/frame/elections-phragmen", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } +pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } frame-executive = { path = "../../../substrate/frame/executive", default-features = false } pallet-grandpa = { path = "../../../substrate/frame/grandpa", default-features = false } pallet-identity = { path = "../../../substrate/frame/identity", default-features = false } @@ -105,7 +105,7 @@ xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", [dev-dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } -remote-externalities = { package = "frame-remote-externalities" , path = "../../../substrate/utils/frame/remote-externalities" } +remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } sp-trie = { path = "../../../substrate/primitives/trie" } separator = "0.4.1" serde_json = "1.0.108" @@ -116,7 +116,7 @@ tokio = { version = "1.24.2", features = ["macros"] } substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] no_std = [] std = [ "authority-discovery-primitives/std", @@ -313,11 +313,11 @@ try-runtime = [ ] # Set timing constants (e.g. session period) to faster versions to speed up testing. -fast-runtime = [ "rococo-runtime-constants/fast-runtime" ] +fast-runtime = ["rococo-runtime-constants/fast-runtime"] -runtime-metrics = [ "runtime-parachains/runtime-metrics", "sp-io/with-tracing" ] +runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/polkadot/runtime/rococo/constants/Cargo.toml b/polkadot/runtime/rococo/constants/Cargo.toml index 8ff6d57ea5b..e052c89311f 100644 --- a/polkadot/runtime/rococo/constants/Cargo.toml +++ b/polkadot/runtime/rococo/constants/Cargo.toml @@ -19,7 +19,7 @@ sp-core = { path = "../../../../substrate/primitives/core", default-features = f xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "primitives/std", diff --git a/polkadot/runtime/test-runtime/Cargo.toml b/polkadot/runtime/test-runtime/Cargo.toml index 85e452d1bd4..850047c83bc 100644 --- a/polkadot/runtime/test-runtime/Cargo.toml +++ b/polkadot/runtime/test-runtime/Cargo.toml @@ -77,7 +77,7 @@ serde_json = "1.0.108" substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] no_std = [] only-staking = [] runtime-metrics = [ diff --git a/polkadot/runtime/test-runtime/constants/Cargo.toml b/polkadot/runtime/test-runtime/constants/Cargo.toml index d83e92a6ce8..88cd441f73b 100644 --- a/polkadot/runtime/test-runtime/constants/Cargo.toml +++ b/polkadot/runtime/test-runtime/constants/Cargo.toml @@ -17,7 +17,7 @@ sp-weights = { path = "../../../../substrate/primitives/weights", default-featur sp-core = { path = "../../../../substrate/primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "primitives/std", diff --git a/polkadot/runtime/westend/Cargo.toml b/polkadot/runtime/westend/Cargo.toml index c31db2703c1..d8402ff39ee 100644 --- a/polkadot/runtime/westend/Cargo.toml +++ b/polkadot/runtime/westend/Cargo.toml @@ -46,7 +46,7 @@ frame-support = { path = "../../../substrate/frame/support", default-features = frame-system = { path = "../../../substrate/frame/system", default-features = false } frame-system-rpc-runtime-api = { path = "../../../substrate/frame/system/rpc/runtime-api", default-features = false } westend-runtime-constants = { package = "westend-runtime-constants", path = "constants", default-features = false } -pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } +pallet-asset-rate = { path = "../../../substrate/frame/asset-rate", default-features = false } pallet-authority-discovery = { path = "../../../substrate/frame/authority-discovery", default-features = false } pallet-authorship = { path = "../../../substrate/frame/authorship", default-features = false } pallet-babe = { path = "../../../substrate/frame/babe", default-features = false } @@ -117,7 +117,7 @@ hex-literal = "0.4.1" tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" } serde_json = "1.0.108" -remote-externalities = { package = "frame-remote-externalities" , path = "../../../substrate/utils/frame/remote-externalities" } +remote-externalities = { package = "frame-remote-externalities", path = "../../../substrate/utils/frame/remote-externalities" } tokio = { version = "1.24.2", features = ["macros"] } sp-tracing = { path = "../../../substrate/primitives/tracing", default-features = false } @@ -125,7 +125,7 @@ sp-tracing = { path = "../../../substrate/primitives/tracing", default-features substrate-wasm-builder = { path = "../../../substrate/utils/wasm-builder" } [features] -default = [ "std" ] +default = ["std"] no_std = [] only-staking = [] std = [ @@ -339,9 +339,9 @@ try-runtime = [ # Set timing constants (e.g. session period) to faster versions to speed up testing. fast-runtime = [] -runtime-metrics = [ "runtime-parachains/runtime-metrics", "sp-io/with-tracing" ] +runtime-metrics = ["runtime-parachains/runtime-metrics", "sp-io/with-tracing"] # A feature that should be enabled when the runtime should be built for on-chain # deployment. This will disable stuff that shouldn't be part of the on-chain wasm # to make it smaller like logging for example. -on-chain-release-build = [ "sp-api/disable-logging" ] +on-chain-release-build = ["sp-api/disable-logging"] diff --git a/polkadot/runtime/westend/constants/Cargo.toml b/polkadot/runtime/westend/constants/Cargo.toml index 2243210975b..24f0da63b04 100644 --- a/polkadot/runtime/westend/constants/Cargo.toml +++ b/polkadot/runtime/westend/constants/Cargo.toml @@ -19,7 +19,7 @@ sp-core = { path = "../../../../substrate/primitives/core", default-features = f xcm = { package = "staging-xcm", path = "../../../xcm", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "primitives/std", diff --git a/polkadot/xcm/Cargo.toml b/polkadot/xcm/Cargo.toml index 8190b812bf6..8fa0d128950 100644 --- a/polkadot/xcm/Cargo.toml +++ b/polkadot/xcm/Cargo.toml @@ -8,10 +8,10 @@ license.workspace = true [dependencies] bounded-collections = { version = "0.1.8", default-features = false, features = ["serde"] } -derivative = { version = "2.2.0", default-features = false, features = [ "use_core" ] } +derivative = { version = "2.2.0", default-features = false, features = ["use_core"] } impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } -parity-scale-codec = { version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len" ] } +parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } sp-weights = { path = "../../substrate/primitives/weights", default-features = false, features = ["serde"] } serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } @@ -24,7 +24,7 @@ hex = "0.4.3" hex-literal = "0.4.1" [features] -default = [ "std" ] +default = ["std"] wasm-api = [] std = [ "bounded-collections/std", diff --git a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml index 5be0bbe4ae5..5438279c673 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml +++ b/polkadot/xcm/pallet-xcm-benchmarks/Cargo.toml @@ -12,13 +12,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } -frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false} +frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-builder = { package = "staging-xcm-builder", path = "../xcm-builder", default-features = false } log = "0.4.17" @@ -36,7 +36,7 @@ polkadot-runtime-common = { path = "../../runtime/common" } polkadot-primitives = { path = "../../primitives" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/polkadot/xcm/pallet-xcm/Cargo.toml b/polkadot/xcm/pallet-xcm/Cargo.toml index 209c826b8f8..645ac8f9941 100644 --- a/polkadot/xcm/pallet-xcm/Cargo.toml +++ b/polkadot/xcm/pallet-xcm/Cargo.toml @@ -13,12 +13,12 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive serde = { version = "1.0.193", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } -frame-support = { path = "../../../substrate/frame/support", default-features = false} -frame-system = { path = "../../../substrate/frame/system", default-features = false} -sp-core = { path = "../../../substrate/primitives/core", default-features = false} -sp-io = { path = "../../../substrate/primitives/io", default-features = false} -sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false} -sp-std = { path = "../../../substrate/primitives/std", default-features = false} +frame-support = { path = "../../../substrate/frame/support", default-features = false } +frame-system = { path = "../../../substrate/frame/system", default-features = false } +sp-core = { path = "../../../substrate/primitives/core", default-features = false } +sp-io = { path = "../../../substrate/primitives/io", default-features = false } +sp-runtime = { path = "../../../substrate/primitives/runtime", default-features = false } +sp-std = { path = "../../../substrate/primitives/std", default-features = false } xcm = { package = "staging-xcm", path = "..", default-features = false } xcm-executor = { package = "staging-xcm-executor", path = "../xcm-executor", default-features = false } @@ -34,7 +34,7 @@ polkadot-runtime-parachains = { path = "../../runtime/parachains" } polkadot-parachain-primitives = { path = "../../parachain" } [features] -default = [ "std" ] +default = ["std"] std = [ "bounded-collections/std", "codec/std", diff --git a/polkadot/xcm/xcm-builder/Cargo.toml b/polkadot/xcm/xcm-builder/Cargo.toml index 7d6c40eb841..53743066720 100644 --- a/polkadot/xcm/xcm-builder/Cargo.toml +++ b/polkadot/xcm/xcm-builder/Cargo.toml @@ -37,7 +37,7 @@ assert_matches = "1.5.0" polkadot-test-runtime = { path = "../../runtime/test-runtime" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", diff --git a/polkadot/xcm/xcm-executor/Cargo.toml b/polkadot/xcm/xcm-executor/Cargo.toml index d5edb1ea0f5..b435c2d510a 100644 --- a/polkadot/xcm/xcm-executor/Cargo.toml +++ b/polkadot/xcm/xcm-executor/Cargo.toml @@ -23,7 +23,7 @@ log = { version = "0.4.17", default-features = false } frame-benchmarking = { path = "../../../substrate/frame/benchmarking", default-features = false, optional = true } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml index ddb45965ee4..0818d16a262 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml +++ b/polkadot/xcm/xcm-executor/integration-tests/Cargo.toml @@ -26,5 +26,5 @@ xcm-executor = { package = "staging-xcm-executor", path = ".." } sp-tracing = { path = "../../../../substrate/primitives/tracing" } [features] -default = [ "std" ] -std = [ "frame-support/std", "sp-runtime/std", "xcm/std" ] +default = ["std"] +std = ["frame-support/std", "sp-runtime/std", "xcm/std"] diff --git a/substrate/bin/minimal/node/Cargo.toml b/substrate/bin/minimal/node/Cargo.toml index 56f123a4719..d8c8c7740b0 100644 --- a/substrate/bin/minimal/node/Cargo.toml +++ b/substrate/bin/minimal/node/Cargo.toml @@ -24,9 +24,9 @@ jsonrpsee = { version = "0.16.2", features = ["server"] } serde_json = "1.0.108" sc-cli = { path = "../../../client/cli" } -sc-executor = { path = "../../../client/executor" } -sc-network = { path = "../../../client/network" } -sc-service = { path = "../../../client/service" } +sc-executor = { path = "../../../client/executor" } +sc-network = { path = "../../../client/network" } +sc-service = { path = "../../../client/service" } sc-telemetry = { path = "../../../client/telemetry" } sc-transaction-pool = { path = "../../../client/transaction-pool" } sc-transaction-pool-api = { path = "../../../client/transaction-pool/api" } @@ -47,7 +47,7 @@ sp-runtime = { path = "../../../primitives/runtime" } substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } -frame = { path = "../../../frame", features = ["runtime", "experimental"] } +frame = { path = "../../../frame", features = ["experimental", "runtime"] } runtime = { package = "minimal-runtime", path = "../runtime" } [build-dependencies] diff --git a/substrate/bin/minimal/runtime/Cargo.toml b/substrate/bin/minimal/runtime/Cargo.toml index 85d56d0638a..f7685642d27 100644 --- a/substrate/bin/minimal/runtime/Cargo.toml +++ b/substrate/bin/minimal/runtime/Cargo.toml @@ -13,8 +13,8 @@ parity-scale-codec = { version = "3.0.0", default-features = false } scale-info = { version = "2.6.0", default-features = false } # this is a frame-based runtime, thus importing `frame` with runtime feature enabled. -frame = { path = "../../../frame", default-features = false, features = ["runtime", "experimental"] } -frame-support = { path = "../../../frame/support", default-features = false} +frame = { path = "../../../frame", default-features = false, features = ["experimental", "runtime"] } +frame-support = { path = "../../../frame/support", default-features = false } # pallets that we want to use pallet-balances = { path = "../../../frame/balances", default-features = false } @@ -24,14 +24,14 @@ pallet-transaction-payment = { path = "../../../frame/transaction-payment", defa pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } # genesis builder that allows us to interacto with runtime genesis config -sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false} +sp-genesis-builder = { path = "../../../primitives/genesis-builder", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-support/std", "frame/std", diff --git a/substrate/bin/node-template/node/Cargo.toml b/substrate/bin/node-template/node/Cargo.toml index ed1980fbb82..a76aaf2a631 100644 --- a/substrate/bin/node-template/node/Cargo.toml +++ b/substrate/bin/node-template/node/Cargo.toml @@ -18,7 +18,7 @@ name = "node-template" [dependencies] clap = { version = "4.4.10", features = ["derive"] } -futures = { version = "0.3.21", features = ["thread-pool"]} +futures = { version = "0.3.21", features = ["thread-pool"] } serde_json = "1.0.108" sc-cli = { path = "../../../client/cli" } @@ -42,7 +42,7 @@ sp-timestamp = { path = "../../../primitives/timestamp" } sp-inherents = { path = "../../../primitives/inherents" } sp-keyring = { path = "../../../primitives/keyring" } frame-system = { path = "../../../frame/system" } -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false} +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } # These dependencies are used for the node template's RPCs jsonrpsee = { version = "0.16.2", features = ["server"] } @@ -62,7 +62,7 @@ frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli" } node-template-runtime = { path = "../runtime" } # CLI-specific dependencies -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true} +try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } [build-dependencies] substrate-build-script-utils = { path = "../../../utils/build-script-utils" } diff --git a/substrate/bin/node-template/pallets/template/Cargo.toml b/substrate/bin/node-template/pallets/template/Cargo.toml index 77183c42cd6..405d9c229f8 100644 --- a/substrate/bin/node-template/pallets/template/Cargo.toml +++ b/substrate/bin/node-template/pallets/template/Cargo.toml @@ -17,10 +17,10 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../../../frame/benchmarking", default-features = false, optional = true} -frame-support = { path = "../../../../frame/support", default-features = false} -frame-system = { path = "../../../../frame/system", default-features = false} -sp-std = { path = "../../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../../../frame/benchmarking", default-features = false, optional = true } +frame-support = { path = "../../../../frame/support", default-features = false } +frame-system = { path = "../../../../frame/system", default-features = false } +sp-std = { path = "../../../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../../../primitives/core" } @@ -28,7 +28,7 @@ sp-io = { path = "../../../../primitives/io" } sp-runtime = { path = "../../../../primitives/runtime" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/bin/node-template/runtime/Cargo.toml b/substrate/bin/node-template/runtime/Cargo.toml index 7711ddba34d..55fb03159ab 100644 --- a/substrate/bin/node-template/runtime/Cargo.toml +++ b/substrate/bin/node-template/runtime/Cargo.toml @@ -16,48 +16,48 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -pallet-aura = { path = "../../../frame/aura", default-features = false} -pallet-balances = { path = "../../../frame/balances", default-features = false} -frame-support = { path = "../../../frame/support", default-features = false} -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false} -pallet-sudo = { path = "../../../frame/sudo", default-features = false} -frame-system = { path = "../../../frame/system", default-features = false} +pallet-aura = { path = "../../../frame/aura", default-features = false } +pallet-balances = { path = "../../../frame/balances", default-features = false } +frame-support = { path = "../../../frame/support", default-features = false } +pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } +pallet-sudo = { path = "../../../frame/sudo", default-features = false } +frame-system = { path = "../../../frame/system", default-features = false } frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false} -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false} -frame-executive = { path = "../../../frame/executive", default-features = false} -sp-api = { path = "../../../primitives/api", default-features = false} -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} +pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } +frame-executive = { path = "../../../frame/executive", default-features = false } +sp-api = { path = "../../../primitives/api", default-features = false } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } sp-consensus-aura = { path = "../../../primitives/consensus/aura", default-features = false, features = ["serde"] } sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"]} -sp-inherents = { path = "../../../primitives/inherents", default-features = false} -sp-offchain = { path = "../../../primitives/offchain", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } +sp-inherents = { path = "../../../primitives/inherents", default-features = false } +sp-offchain = { path = "../../../primitives/offchain", default-features = false } sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../../primitives/session", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} -sp-storage = { path = "../../../primitives/storage", default-features = false} -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false} +sp-session = { path = "../../../primitives/session", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-storage = { path = "../../../primitives/storage", default-features = false } +sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } # Used for the node template's RPCs -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false} +frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } # Used for runtime benchmarking frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false, optional = true } frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } # Local Dependencies -pallet-template = { path = "../pallets/template", default-features = false} +pallet-template = { path = "../pallets/template", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", @@ -119,4 +119,4 @@ try-runtime = [ "pallet-transaction-payment/try-runtime", "sp-runtime/try-runtime", ] -experimental = [ "pallet-aura/experimental" ] +experimental = ["pallet-aura/experimental"] diff --git a/substrate/bin/node-template/rust-toolchain.toml b/substrate/bin/node-template/rust-toolchain.toml index 64daeff6836..2a35c6ed07c 100644 --- a/substrate/bin/node-template/rust-toolchain.toml +++ b/substrate/bin/node-template/rust-toolchain.toml @@ -6,9 +6,9 @@ components = [ "rust-analyzer", "rust-src", "rust-std", - "rustc-dev", "rustc", + "rustc-dev", "rustfmt", ] -targets = [ "wasm32-unknown-unknown" ] +targets = ["wasm32-unknown-unknown"] profile = "minimal" diff --git a/substrate/bin/node/bench/Cargo.toml b/substrate/bin/node/bench/Cargo.toml index a3fb8b43030..5ce91dc3c44 100644 --- a/substrate/bin/node/bench/Cargo.toml +++ b/substrate/bin/node/bench/Cargo.toml @@ -31,7 +31,7 @@ sp-core = { path = "../../../primitives/core" } sp-consensus = { path = "../../../primitives/consensus/common" } sc-basic-authorship = { path = "../../../client/basic-authorship" } sp-inherents = { path = "../../../primitives/inherents" } -sp-timestamp = { path = "../../../primitives/timestamp", default-features = false} +sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } sp-tracing = { path = "../../../primitives/tracing" } hash-db = "0.16.0" tempfile = "3.1.0" diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 3e8ffdf83d0..e511633ff50 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -79,7 +79,7 @@ sc-consensus-babe = { path = "../../../client/consensus/babe" } grandpa = { package = "sc-consensus-grandpa", path = "../../../client/consensus/grandpa" } sc-rpc = { path = "../../../client/rpc" } sc-basic-authorship = { path = "../../../client/basic-authorship" } -sc-service = { path = "../../../client/service", default-features = false} +sc-service = { path = "../../../client/service", default-features = false } sc-telemetry = { path = "../../../client/telemetry" } sc-executor = { path = "../../../client/executor" } sc-authority-discovery = { path = "../../../client/authority-discovery" } @@ -96,8 +96,8 @@ frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api" pallet-assets = { path = "../../../frame/assets" } pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment" } pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment" } -pallet-im-online = { path = "../../../frame/im-online", default-features = false} -pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false} +pallet-im-online = { path = "../../../frame/im-online", default-features = false } +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false } # node-specific dependencies kitchensink-runtime = { path = "../runtime" } @@ -105,10 +105,10 @@ node-rpc = { path = "../rpc" } node-primitives = { path = "../primitives" } # CLI-specific dependencies -sc-cli = { path = "../../../client/cli", optional = true} -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} -node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true} +sc-cli = { path = "../../../client/cli", optional = true } +frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } +node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } +try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } serde_json = "1.0.108" [dev-dependencies] @@ -129,7 +129,7 @@ regex = "1.6.0" platforms = "3.0" soketto = "0.7.1" criterion = { version = "0.4.0", features = ["async_tokio"] } -tokio = { version = "1.22.0", features = ["macros", "time", "parking_lot"] } +tokio = { version = "1.22.0", features = ["macros", "parking_lot", "time"] } tokio-util = { version = "0.7.4", features = ["compat"] } wait-timeout = "0.2" substrate-rpc-client = { path = "../../../utils/frame/rpc/client" } @@ -159,17 +159,17 @@ sp-state-machine = { path = "../../../primitives/state-machine" } [build-dependencies] clap = { version = "4.4.10", optional = true } clap_complete = { version = "4.0.2", optional = true } -node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true} -frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true} -substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true} -substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true} -try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true} +node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } +frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } +substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true } +substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true } +try-runtime-cli = { path = "../../../utils/frame/try-runtime/cli", optional = true } sc-cli = { path = "../../../client/cli", optional = true } pallet-balances = { path = "../../../frame/balances" } sc-storage-monitor = { path = "../../../client/storage-monitor" } [features] -default = [ "cli" ] +default = ["cli"] cli = [ "clap", "clap_complete", diff --git a/substrate/bin/node/inspect/Cargo.toml b/substrate/bin/node/inspect/Cargo.toml index a5187d8f265..cdf4b1ff146 100644 --- a/substrate/bin/node/inspect/Cargo.toml +++ b/substrate/bin/node/inspect/Cargo.toml @@ -17,7 +17,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1" } thiserror = "1.0" sc-cli = { path = "../../../client/cli" } sc-client-api = { path = "../../../client/api" } -sc-service = { path = "../../../client/service", default-features = false} +sc-service = { path = "../../../client/service", default-features = false } sp-blockchain = { path = "../../../primitives/blockchain" } sp-core = { path = "../../../primitives/core" } sp-io = { path = "../../../primitives/io" } diff --git a/substrate/bin/node/primitives/Cargo.toml b/substrate/bin/node/primitives/Cargo.toml index 77bf7ad4676..40735ff21d4 100644 --- a/substrate/bin/node/primitives/Cargo.toml +++ b/substrate/bin/node/primitives/Cargo.toml @@ -13,9 +13,9 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } [features] -default = [ "std" ] -std = [ "sp-core/std", "sp-runtime/std" ] +default = ["std"] +std = ["sp-core/std", "sp-runtime/std"] diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index cb4970e7c9d..e53646c0ef4 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -26,122 +26,122 @@ log = { version = "0.4.17", default-features = false } serde_json = { version = "1.0.108", default-features = false, features = ["alloc", "arbitrary_precision"] } # pallet-asset-conversion: turn on "num-traits" feature -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info", "num-traits"] } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } # primitives -sp-authority-discovery = { path = "../../../primitives/authority-discovery", default-features = false, features=["serde"] } -sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false, features=["serde"] } -sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features=["serde"] } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} +sp-authority-discovery = { path = "../../../primitives/authority-discovery", default-features = false, features = ["serde"] } +sp-consensus-babe = { path = "../../../primitives/consensus/babe", default-features = false, features = ["serde"] } +sp-consensus-grandpa = { path = "../../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false } sp-genesis-builder = { default-features = false, path = "../../../primitives/genesis-builder" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false} -node-primitives = { path = "../primitives", default-features = false} +sp-inherents = { path = "../../../primitives/inherents", default-features = false } +node-primitives = { path = "../primitives", default-features = false } sp-mixnet = { path = "../../../primitives/mixnet", default-features = false } -sp-offchain = { path = "../../../primitives/offchain", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false, features=["serde"] } -sp-std = { path = "../../../primitives/std", default-features = false} -sp-api = { path = "../../../primitives/api", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false, features=["serde"] } -sp-staking = { path = "../../../primitives/staking", default-features = false, features=["serde"] } -sp-storage = { path = "../../../primitives/storage", default-features = false} -sp-session = { path = "../../../primitives/session", default-features = false} -sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false} -sp-statement-store = { path = "../../../primitives/statement-store", default-features = false, features=["serde"] } -sp-version = { path = "../../../primitives/version", default-features = false, features=["serde"] } -sp-io = { path = "../../../primitives/io", default-features = false} +sp-offchain = { path = "../../../primitives/offchain", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false, features = ["serde"] } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-api = { path = "../../../primitives/api", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false, features = ["serde"] } +sp-staking = { path = "../../../primitives/staking", default-features = false, features = ["serde"] } +sp-storage = { path = "../../../primitives/storage", default-features = false } +sp-session = { path = "../../../primitives/session", default-features = false } +sp-transaction-pool = { path = "../../../primitives/transaction-pool", default-features = false } +sp-statement-store = { path = "../../../primitives/statement-store", default-features = false, features = ["serde"] } +sp-version = { path = "../../../primitives/version", default-features = false, features = ["serde"] } +sp-io = { path = "../../../primitives/io", default-features = false } # frame dependencies -frame-executive = { path = "../../../frame/executive", default-features = false} -frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false} -frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false} +frame-executive = { path = "../../../frame/executive", default-features = false } +frame-benchmarking = { path = "../../../frame/benchmarking", default-features = false } +frame-benchmarking-pallet-pov = { path = "../../../frame/benchmarking/pov", default-features = false } frame-support = { path = "../../../frame/support", default-features = false, features = ["tuples-96"] } -frame-system = { path = "../../../frame/system", default-features = false} +frame-system = { path = "../../../frame/system", default-features = false } frame-system-benchmarking = { path = "../../../frame/system/benchmarking", default-features = false, optional = true } -frame-election-provider-support = { path = "../../../frame/election-provider-support", default-features = false} -frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false} +frame-election-provider-support = { path = "../../../frame/election-provider-support", default-features = false } +frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", default-features = false } frame-try-runtime = { path = "../../../frame/try-runtime", default-features = false, optional = true } -pallet-alliance = { path = "../../../frame/alliance", default-features = false} -pallet-asset-conversion = { path = "../../../frame/asset-conversion", default-features = false} -pallet-asset-rate = { path = "../../../frame/asset-rate", default-features = false} -pallet-assets = { path = "../../../frame/assets", default-features = false} -pallet-authority-discovery = { path = "../../../frame/authority-discovery", default-features = false} -pallet-authorship = { path = "../../../frame/authorship", default-features = false} -pallet-babe = { path = "../../../frame/babe", default-features = false} -pallet-bags-list = { path = "../../../frame/bags-list", default-features = false} -pallet-balances = { path = "../../../frame/balances", default-features = false} -pallet-bounties = { path = "../../../frame/bounties", default-features = false} -pallet-broker = { path = "../../../frame/broker", default-features = false} -pallet-child-bounties = { path = "../../../frame/child-bounties", default-features = false} -pallet-collective = { path = "../../../frame/collective", default-features = false} -pallet-contracts = { path = "../../../frame/contracts", default-features = false} -pallet-conviction-voting = { path = "../../../frame/conviction-voting", default-features = false} -pallet-core-fellowship = { path = "../../../frame/core-fellowship", default-features = false} -pallet-democracy = { path = "../../../frame/democracy", default-features = false} -pallet-election-provider-multi-phase = { path = "../../../frame/election-provider-multi-phase", default-features = false} +pallet-alliance = { path = "../../../frame/alliance", default-features = false } +pallet-asset-conversion = { path = "../../../frame/asset-conversion", default-features = false } +pallet-asset-rate = { path = "../../../frame/asset-rate", default-features = false } +pallet-assets = { path = "../../../frame/assets", default-features = false } +pallet-authority-discovery = { path = "../../../frame/authority-discovery", default-features = false } +pallet-authorship = { path = "../../../frame/authorship", default-features = false } +pallet-babe = { path = "../../../frame/babe", default-features = false } +pallet-bags-list = { path = "../../../frame/bags-list", default-features = false } +pallet-balances = { path = "../../../frame/balances", default-features = false } +pallet-bounties = { path = "../../../frame/bounties", default-features = false } +pallet-broker = { path = "../../../frame/broker", default-features = false } +pallet-child-bounties = { path = "../../../frame/child-bounties", default-features = false } +pallet-collective = { path = "../../../frame/collective", default-features = false } +pallet-contracts = { path = "../../../frame/contracts", default-features = false } +pallet-conviction-voting = { path = "../../../frame/conviction-voting", default-features = false } +pallet-core-fellowship = { path = "../../../frame/core-fellowship", default-features = false } +pallet-democracy = { path = "../../../frame/democracy", default-features = false } +pallet-election-provider-multi-phase = { path = "../../../frame/election-provider-multi-phase", default-features = false } pallet-election-provider-support-benchmarking = { path = "../../../frame/election-provider-support/benchmarking", default-features = false, optional = true } -pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false} -pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false} -pallet-nis = { path = "../../../frame/nis", default-features = false} -pallet-grandpa = { path = "../../../frame/grandpa", default-features = false} -pallet-im-online = { path = "../../../frame/im-online", default-features = false} -pallet-indices = { path = "../../../frame/indices", default-features = false} -pallet-identity = { path = "../../../frame/identity", default-features = false} -pallet-lottery = { path = "../../../frame/lottery", default-features = false} -pallet-membership = { path = "../../../frame/membership", default-features = false} -pallet-message-queue = { path = "../../../frame/message-queue", default-features = false} +pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", default-features = false } +pallet-fast-unstake = { path = "../../../frame/fast-unstake", default-features = false } +pallet-nis = { path = "../../../frame/nis", default-features = false } +pallet-grandpa = { path = "../../../frame/grandpa", default-features = false } +pallet-im-online = { path = "../../../frame/im-online", default-features = false } +pallet-indices = { path = "../../../frame/indices", default-features = false } +pallet-identity = { path = "../../../frame/identity", default-features = false } +pallet-lottery = { path = "../../../frame/lottery", default-features = false } +pallet-membership = { path = "../../../frame/membership", default-features = false } +pallet-message-queue = { path = "../../../frame/message-queue", default-features = false } pallet-mixnet = { path = "../../../frame/mixnet", default-features = false } -pallet-mmr = { path = "../../../frame/merkle-mountain-range", default-features = false} -pallet-multisig = { path = "../../../frame/multisig", default-features = false} -pallet-nfts = { path = "../../../frame/nfts", default-features = false} -pallet-nfts-runtime-api = { path = "../../../frame/nfts/runtime-api", default-features = false} -pallet-nft-fractionalization = { path = "../../../frame/nft-fractionalization", default-features = false} -pallet-nomination-pools = { path = "../../../frame/nomination-pools", default-features = false} -pallet-nomination-pools-benchmarking = { path = "../../../frame/nomination-pools/benchmarking", default-features = false, optional = true} -pallet-nomination-pools-runtime-api = { path = "../../../frame/nomination-pools/runtime-api", default-features = false} -pallet-offences = { path = "../../../frame/offences", default-features = false} +pallet-mmr = { path = "../../../frame/merkle-mountain-range", default-features = false } +pallet-multisig = { path = "../../../frame/multisig", default-features = false } +pallet-nfts = { path = "../../../frame/nfts", default-features = false } +pallet-nfts-runtime-api = { path = "../../../frame/nfts/runtime-api", default-features = false } +pallet-nft-fractionalization = { path = "../../../frame/nft-fractionalization", default-features = false } +pallet-nomination-pools = { path = "../../../frame/nomination-pools", default-features = false } +pallet-nomination-pools-benchmarking = { path = "../../../frame/nomination-pools/benchmarking", default-features = false, optional = true } +pallet-nomination-pools-runtime-api = { path = "../../../frame/nomination-pools/runtime-api", default-features = false } +pallet-offences = { path = "../../../frame/offences", default-features = false } pallet-offences-benchmarking = { path = "../../../frame/offences/benchmarking", default-features = false, optional = true } -pallet-glutton = { path = "../../../frame/glutton", default-features = false} -pallet-preimage = { path = "../../../frame/preimage", default-features = false} -pallet-proxy = { path = "../../../frame/proxy", default-features = false} -pallet-insecure-randomness-collective-flip = { path = "../../../frame/insecure-randomness-collective-flip", default-features = false} -pallet-ranked-collective = { path = "../../../frame/ranked-collective", default-features = false} -pallet-recovery = { path = "../../../frame/recovery", default-features = false} -pallet-referenda = { path = "../../../frame/referenda", default-features = false} -pallet-remark = { path = "../../../frame/remark", default-features = false} -pallet-root-testing = { path = "../../../frame/root-testing", default-features = false} -pallet-salary = { path = "../../../frame/salary", default-features = false} -pallet-session = { path = "../../../frame/session", default-features = false , features = [ "historical" ]} +pallet-glutton = { path = "../../../frame/glutton", default-features = false } +pallet-preimage = { path = "../../../frame/preimage", default-features = false } +pallet-proxy = { path = "../../../frame/proxy", default-features = false } +pallet-insecure-randomness-collective-flip = { path = "../../../frame/insecure-randomness-collective-flip", default-features = false } +pallet-ranked-collective = { path = "../../../frame/ranked-collective", default-features = false } +pallet-recovery = { path = "../../../frame/recovery", default-features = false } +pallet-referenda = { path = "../../../frame/referenda", default-features = false } +pallet-remark = { path = "../../../frame/remark", default-features = false } +pallet-root-testing = { path = "../../../frame/root-testing", default-features = false } +pallet-salary = { path = "../../../frame/salary", default-features = false } +pallet-session = { path = "../../../frame/session", default-features = false, features = ["historical"] } pallet-session-benchmarking = { path = "../../../frame/session/benchmarking", default-features = false, optional = true } -pallet-staking = { path = "../../../frame/staking", default-features = false} -pallet-staking-reward-curve = { path = "../../../frame/staking/reward-curve", default-features = false} -pallet-staking-runtime-api = { path = "../../../frame/staking/runtime-api", default-features = false} -pallet-state-trie-migration = { path = "../../../frame/state-trie-migration", default-features = false} -pallet-statement = { path = "../../../frame/statement", default-features = false} -pallet-scheduler = { path = "../../../frame/scheduler", default-features = false} -pallet-society = { path = "../../../frame/society", default-features = false} -pallet-sudo = { path = "../../../frame/sudo", default-features = false} -pallet-timestamp = { path = "../../../frame/timestamp", default-features = false} -pallet-tips = { path = "../../../frame/tips", default-features = false} -pallet-treasury = { path = "../../../frame/treasury", default-features = false} -pallet-utility = { path = "../../../frame/utility", default-features = false} -pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false} -pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false} -pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false} -pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false} -pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false} -pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false} -pallet-uniques = { path = "../../../frame/uniques", default-features = false} -pallet-vesting = { path = "../../../frame/vesting", default-features = false} -pallet-whitelist = { path = "../../../frame/whitelist", default-features = false} -pallet-tx-pause = { path = "../../../frame/tx-pause", default-features = false} -pallet-safe-mode = { path = "../../../frame/safe-mode", default-features = false} +pallet-staking = { path = "../../../frame/staking", default-features = false } +pallet-staking-reward-curve = { path = "../../../frame/staking/reward-curve", default-features = false } +pallet-staking-runtime-api = { path = "../../../frame/staking/runtime-api", default-features = false } +pallet-state-trie-migration = { path = "../../../frame/state-trie-migration", default-features = false } +pallet-statement = { path = "../../../frame/statement", default-features = false } +pallet-scheduler = { path = "../../../frame/scheduler", default-features = false } +pallet-society = { path = "../../../frame/society", default-features = false } +pallet-sudo = { path = "../../../frame/sudo", default-features = false } +pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } +pallet-tips = { path = "../../../frame/tips", default-features = false } +pallet-treasury = { path = "../../../frame/treasury", default-features = false } +pallet-utility = { path = "../../../frame/utility", default-features = false } +pallet-transaction-payment = { path = "../../../frame/transaction-payment", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { path = "../../../frame/transaction-payment/rpc/runtime-api", default-features = false } +pallet-asset-conversion-tx-payment = { path = "../../../frame/transaction-payment/asset-conversion-tx-payment", default-features = false } +pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx-payment", default-features = false } +pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment", default-features = false } +pallet-transaction-storage = { path = "../../../frame/transaction-storage", default-features = false } +pallet-uniques = { path = "../../../frame/uniques", default-features = false } +pallet-vesting = { path = "../../../frame/vesting", default-features = false } +pallet-whitelist = { path = "../../../frame/whitelist", default-features = false } +pallet-tx-pause = { path = "../../../frame/tx-pause", default-features = false } +pallet-safe-mode = { path = "../../../frame/safe-mode", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] -with-tracing = [ "frame-executive/with-tracing" ] +default = ["std"] +with-tracing = ["frame-executive/with-tracing"] std = [ "codec/std", "frame-benchmarking-pallet-pov/std", diff --git a/substrate/bin/node/testing/Cargo.toml b/substrate/bin/node/testing/Cargo.toml index dba5016fb3e..513cb22b6a2 100644 --- a/substrate/bin/node/testing/Cargo.toml +++ b/substrate/bin/node/testing/Cargo.toml @@ -29,13 +29,13 @@ pallet-asset-tx-payment = { path = "../../../frame/transaction-payment/asset-tx- pallet-skip-feeless-payment = { path = "../../../frame/transaction-payment/skip-feeless-payment" } sc-block-builder = { path = "../../../client/block-builder" } sc-client-api = { path = "../../../client/api" } -sc-client-db = { path = "../../../client/db", features = ["rocksdb"]} +sc-client-db = { path = "../../../client/db", features = ["rocksdb"] } sc-consensus = { path = "../../../client/consensus/common" } sc-executor = { path = "../../../client/executor" } sc-service = { path = "../../../client/service", features = [ - "test-helpers", "rocksdb", -]} + "test-helpers", +] } sp-api = { path = "../../../primitives/api" } sp-block-builder = { path = "../../../primitives/block-builder" } sp-blockchain = { path = "../../../primitives/blockchain" } @@ -45,5 +45,5 @@ sp-inherents = { path = "../../../primitives/inherents" } sp-io = { path = "../../../primitives/io" } sp-keyring = { path = "../../../primitives/keyring" } sp-runtime = { path = "../../../primitives/runtime" } -sp-timestamp = { path = "../../../primitives/timestamp", default-features = false} +sp-timestamp = { path = "../../../primitives/timestamp", default-features = false } substrate-test-client = { path = "../../../test-utils/client" } diff --git a/substrate/client/api/Cargo.toml b/substrate/client/api/Cargo.toml index 2b64c86038d..57a364e791f 100644 --- a/substrate/client/api/Cargo.toml +++ b/substrate/client/api/Cargo.toml @@ -28,10 +28,10 @@ sc-utils = { path = "../utils" } sp-api = { path = "../../primitives/api" } sp-blockchain = { path = "../../primitives/blockchain" } sp-consensus = { path = "../../primitives/consensus/common" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } sp-database = { path = "../../primitives/database" } sp-externalities = { path = "../../primitives/externalities" } -sp-runtime = { path = "../../primitives/runtime", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-state-machine = { path = "../../primitives/state-machine" } sp-statement-store = { path = "../../primitives/statement-store" } sp-storage = { path = "../../primitives/storage" } diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index ef2fdcfd485..4013c8951c4 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -21,8 +21,8 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = futures = "0.3.21" futures-timer = "3.0.1" ip_network = "0.4.1" -libp2p = { version = "0.51.3", features = ["kad", "ed25519"] } -multihash = { version = "0.17.0", default-features = false, features = ["std", "sha2"] } +libp2p = { version = "0.51.3", features = ["ed25519", "kad"] } +multihash = { version = "0.17.0", default-features = false, features = ["sha2", "std"] } log = "0.4.17" prost = "0.11" rand = "0.8.5" diff --git a/substrate/client/chain-spec/Cargo.toml b/substrate/client/chain-spec/Cargo.toml index 009f8cef957..d041d5bfd2b 100644 --- a/substrate/client/chain-spec/Cargo.toml +++ b/substrate/client/chain-spec/Cargo.toml @@ -35,5 +35,5 @@ docify = "0.2.0" [dev-dependencies] substrate-test-runtime = { path = "../../test-utils/runtime" } sp-keyring = { path = "../../primitives/keyring" } -sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } +sp-application-crypto = { default-features = false, path = "../../primitives/application-crypto", features = ["serde"] } sp-consensus-babe = { default-features = false, path = "../../primitives/consensus/babe", features = ["serde"] } diff --git a/substrate/client/cli/Cargo.toml b/substrate/client/cli/Cargo.toml index 54280d4bf15..d75eac1ac98 100644 --- a/substrate/client/cli/Cargo.toml +++ b/substrate/client/cli/Cargo.toml @@ -19,7 +19,7 @@ clap = { version = "4.4.10", features = ["derive", "string", "wrap_help"] } fdlimit = "0.3.0" futures = "0.3.21" itertools = "0.10.3" -libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"]} +libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = "0.4.17" names = { version = "0.13.0", default-features = false } parity-scale-codec = "3.6.1" @@ -30,13 +30,13 @@ serde = "1.0.193" serde_json = "1.0.108" thiserror = "1.0.48" bip39 = "2.0.0" -tokio = { version = "1.22.0", features = ["signal", "rt-multi-thread", "parking_lot"] } +tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "signal"] } sc-client-api = { path = "../api" } -sc-client-db = { path = "../db", default-features = false} +sc-client-db = { path = "../db", default-features = false } sc-keystore = { path = "../keystore" } sc-mixnet = { path = "../mixnet" } sc-network = { path = "../network" } -sc-service = { path = "../service", default-features = false} +sc-service = { path = "../service", default-features = false } sc-telemetry = { path = "../telemetry" } sc-tracing = { path = "../tracing" } sc-utils = { path = "../utils" } @@ -54,5 +54,5 @@ futures-timer = "3.0.1" sp-tracing = { path = "../../primitives/tracing" } [features] -default = [ "rocksdb" ] -rocksdb = [ "sc-client-db/rocksdb" ] +default = ["rocksdb"] +rocksdb = ["sc-client-db/rocksdb"] diff --git a/substrate/client/consensus/babe/rpc/Cargo.toml b/substrate/client/consensus/babe/rpc/Cargo.toml index f6551b2ed54..913dd990fd3 100644 --- a/substrate/client/consensus/babe/rpc/Cargo.toml +++ b/substrate/client/consensus/babe/rpc/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } futures = "0.3.21" serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0" diff --git a/substrate/client/consensus/beefy/rpc/Cargo.toml b/substrate/client/consensus/beefy/rpc/Cargo.toml index c077908c93c..35041a1208f 100644 --- a/substrate/client/consensus/beefy/rpc/Cargo.toml +++ b/substrate/client/consensus/beefy/rpc/Cargo.toml @@ -11,7 +11,7 @@ homepage = "https://substrate.io" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } futures = "0.3.21" -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } log = "0.4" parking_lot = "0.12.1" serde = { version = "1.0.193", features = ["derive"] } @@ -24,6 +24,6 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = "1.0.108" -sc-rpc = { path = "../../../rpc", features = ["test-helpers"]} +sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } substrate-test-runtime-client = { path = "../../../../test-utils/runtime/client" } tokio = { version = "1.22.0", features = ["macros"] } diff --git a/substrate/client/consensus/common/Cargo.toml b/substrate/client/consensus/common/Cargo.toml index f269e3752d4..95ee02a9262 100644 --- a/substrate/client/consensus/common/Cargo.toml +++ b/substrate/client/consensus/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = "0.1.57" futures = { version = "0.3.21", features = ["thread-pool"] } futures-timer = "3.0.1" -libp2p-identity = { version = "0.1.3", features = ["peerid", "ed25519"] } +libp2p-identity = { version = "0.1.3", features = ["ed25519", "peerid"] } log = "0.4.17" mockall = "0.11.3" parking_lot = "0.12.1" diff --git a/substrate/client/consensus/grandpa/rpc/Cargo.toml b/substrate/client/consensus/grandpa/rpc/Cargo.toml index 2e808ac0bdd..2a0d51dd616 100644 --- a/substrate/client/consensus/grandpa/rpc/Cargo.toml +++ b/substrate/client/consensus/grandpa/rpc/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" [dependencies] finality-grandpa = { version = "0.16.2", features = ["derive-codec"] } futures = "0.3.16" -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } log = "0.4.8" parity-scale-codec = { version = "3.6.1", features = ["derive"] } serde = { version = "1.0.193", features = ["derive"] } @@ -26,7 +26,7 @@ sp-runtime = { path = "../../../../primitives/runtime" } [dev-dependencies] sc-block-builder = { path = "../../../block-builder" } -sc-rpc = { path = "../../../rpc", features = ["test-helpers"]} +sc-rpc = { path = "../../../rpc", features = ["test-helpers"] } sp-core = { path = "../../../../primitives/core" } sp-consensus-grandpa = { path = "../../../../primitives/consensus/grandpa" } sp-keyring = { path = "../../../../primitives/keyring" } diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index a6430fdf1de..b0b9c1ee6eb 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } assert_matches = "1.3.0" async-trait = "0.1.57" codec = { package = "parity-scale-codec", version = "3.6.1" } @@ -43,7 +43,7 @@ sp-runtime = { path = "../../../primitives/runtime" } sp-timestamp = { path = "../../../primitives/timestamp" } [dev-dependencies] -tokio = { version = "1.22.0", features = ["rt-multi-thread", "macros"] } +tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] } sc-basic-authorship = { path = "../../basic-authorship" } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool" } diff --git a/substrate/client/db/Cargo.toml b/substrate/client/db/Cargo.toml index cb9560b6cb6..867e2908d6c 100644 --- a/substrate/client/db/Cargo.toml +++ b/substrate/client/db/Cargo.toml @@ -53,7 +53,7 @@ runtime-benchmarks = [ "kitchensink-runtime/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -rocksdb = [ "kvdb-rocksdb" ] +rocksdb = ["kvdb-rocksdb"] [[bench]] name = "state_access" diff --git a/substrate/client/executor/Cargo.toml b/substrate/client/executor/Cargo.toml index 9f41b742373..50aedf8a348 100644 --- a/substrate/client/executor/Cargo.toml +++ b/substrate/client/executor/Cargo.toml @@ -55,7 +55,7 @@ name = "bench" harness = false [features] -default = [ "std" ] +default = ["std"] # This crate does not have `no_std` support, we just require this for tests std = [ "sc-runtime-test/std", diff --git a/substrate/client/executor/runtime-test/Cargo.toml b/substrate/client/executor/runtime-test/Cargo.toml index 046e59c08e0..84ed458fb1c 100644 --- a/substrate/client/executor/runtime-test/Cargo.toml +++ b/substrate/client/executor/runtime-test/Cargo.toml @@ -13,17 +13,17 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false, features = ["improved_panic_error_reporting"]} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false, features = ["improved_panic_error_reporting"] } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "sp-core/std", "sp-io/std", diff --git a/substrate/client/executor/wasmtime/Cargo.toml b/substrate/client/executor/wasmtime/Cargo.toml index 261d52c0ede..b1434ef7c52 100644 --- a/substrate/client/executor/wasmtime/Cargo.toml +++ b/substrate/client/executor/wasmtime/Cargo.toml @@ -25,7 +25,7 @@ wasmtime = { version = "8.0.1", default-features = false, features = [ "cranelift", "jitdump", "parallel-compilation", - "pooling-allocator" + "pooling-allocator", ] } anyhow = "1.0.68" sc-allocator = { path = "../../allocator" } @@ -39,7 +39,7 @@ sp-wasm-interface = { path = "../../../primitives/wasm-interface", features = [" # By default rustix directly calls the appropriate syscalls completely bypassing libc; # this doesn't have any actual benefits for us besides making it harder to debug memory # problems (since then `mmap` etc. cannot be easily hooked into). -rustix = { version = "0.36.7", default-features = false, features = ["std", "mm", "fs", "param", "use-libc"] } +rustix = { version = "0.36.7", default-features = false, features = ["fs", "mm", "param", "std", "use-libc"] } [dev-dependencies] wat = "1.0" diff --git a/substrate/client/merkle-mountain-range/rpc/Cargo.toml b/substrate/client/merkle-mountain-range/rpc/Cargo.toml index c64354abaaf..d978d3cd2ed 100644 --- a/substrate/client/merkle-mountain-range/rpc/Cargo.toml +++ b/substrate/client/merkle-mountain-range/rpc/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } serde = { version = "1.0.193", features = ["derive"] } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/client/network-gossip/Cargo.toml b/substrate/client/network-gossip/Cargo.toml index 5006d5d0e3e..0ad9dec4651 100644 --- a/substrate/client/network-gossip/Cargo.toml +++ b/substrate/client/network-gossip/Cargo.toml @@ -30,6 +30,6 @@ sp-runtime = { path = "../../primitives/runtime" } [dev-dependencies] tokio = "1.22.0" async-trait = "0.1.73" -codec = { package = "parity-scale-codec", version = "3.0.0", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } quickcheck = { version = "1.0.3", default-features = false } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } diff --git a/substrate/client/network/Cargo.toml b/substrate/client/network/Cargo.toml index 2a14ef2dd84..ff8046868d5 100644 --- a/substrate/client/network/Cargo.toml +++ b/substrate/client/network/Cargo.toml @@ -25,7 +25,7 @@ fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" ip_network = "0.4.1" -libp2p = { version = "0.51.3", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "tcp", "tokio", "yamux", "websocket", "request-response"] } +libp2p = { version = "0.51.3", features = ["dns", "identify", "kad", "macros", "mdns", "noise", "ping", "request-response", "tcp", "tokio", "websocket", "yamux"] } linked_hash_set = "0.1.3" log = "0.4.17" mockall = "0.11.3" @@ -39,7 +39,7 @@ smallvec = "1.11.0" thiserror = "1.0" tokio = { version = "1.22.0", features = ["macros", "sync"] } tokio-stream = "0.1.7" -unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } +unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] } zeroize = "1.4.3" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } sc-client-api = { path = "../api" } diff --git a/substrate/client/network/bitswap/Cargo.toml b/substrate/client/network/bitswap/Cargo.toml index 412d603163d..f4ad4b3a0e9 100644 --- a/substrate/client/network/bitswap/Cargo.toml +++ b/substrate/client/network/bitswap/Cargo.toml @@ -23,7 +23,7 @@ libp2p-identity = { version = "0.1.3", features = ["peerid"] } log = "0.4.17" prost = "0.11" thiserror = "1.0" -unsigned-varint = { version = "0.7.1", features = ["futures", "asynchronous_codec"] } +unsigned-varint = { version = "0.7.1", features = ["asynchronous_codec", "futures"] } sc-client-api = { path = "../../api" } sc-network = { path = ".." } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/client/network/light/Cargo.toml b/substrate/client/network/light/Cargo.toml index f426cda7fc8..17b21432811 100644 --- a/substrate/client/network/light/Cargo.toml +++ b/substrate/client/network/light/Cargo.toml @@ -19,7 +19,7 @@ prost-build = "0.11" async-channel = "1.8.0" array-bytes = "6.1" codec = { package = "parity-scale-codec", version = "3.6.1", features = [ - "derive", + "derive", ] } futures = "0.3.21" libp2p-identity = { version = "0.1.3", features = ["peerid"] } diff --git a/substrate/client/network/sync/Cargo.toml b/substrate/client/network/sync/Cargo.toml index a1ea39a852f..a9b8ec577e3 100644 --- a/substrate/client/network/sync/Cargo.toml +++ b/substrate/client/network/sync/Cargo.toml @@ -30,7 +30,7 @@ schnellru = "0.2.1" smallvec = "1.11.0" thiserror = "1.0" tokio-stream = "0.1.14" -tokio = { version = "1.32.0", features = ["time", "macros"] } +tokio = { version = "1.32.0", features = ["macros", "time"] } fork-tree = { path = "../../../utils/fork-tree" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus" } sc-client-api = { path = "../../api" } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index 09f8f1fa9ef..a11ed2a3ec8 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -29,7 +29,7 @@ sc-network-common = { path = "../common" } sc-utils = { path = "../../utils" } sc-network-light = { path = "../light" } sc-network-sync = { path = "../sync" } -sc-service = { path = "../../service", default-features = false, features = ["test-helpers"]} +sc-service = { path = "../../service", default-features = false, features = ["test-helpers"] } sp-blockchain = { path = "../../../primitives/blockchain" } sp-consensus = { path = "../../../primitives/consensus/common" } sp-core = { path = "../../../primitives/core" } diff --git a/substrate/client/offchain/Cargo.toml b/substrate/client/offchain/Cargo.toml index 83397f52879..01deb322134 100644 --- a/substrate/client/offchain/Cargo.toml +++ b/substrate/client/offchain/Cargo.toml @@ -19,7 +19,7 @@ codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive fnv = "1.0.6" futures = "0.3.21" futures-timer = "3.0.2" -hyper = { version = "0.14.16", features = ["stream", "http2"] } +hyper = { version = "0.14.16", features = ["http2", "stream"] } hyper-rustls = { version = "0.24.0", features = ["http2"] } libp2p = "0.51.3" num_cpus = "1.13" @@ -45,7 +45,7 @@ log = "0.4.17" lazy_static = "1.4.0" tokio = "1.22.0" sc-block-builder = { path = "../block-builder" } -sc-client-db = { path = "../db", default-features = true} +sc-client-db = { path = "../db", default-features = true } sc-transaction-pool = { path = "../transaction-pool" } sc-transaction-pool-api = { path = "../transaction-pool/api" } sp-consensus = { path = "../../primitives/consensus/common" } diff --git a/substrate/client/rpc-api/Cargo.toml b/substrate/client/rpc-api/Cargo.toml index baf94f342ef..b5c0f70d94c 100644 --- a/substrate/client/rpc-api/Cargo.toml +++ b/substrate/client/rpc-api/Cargo.toml @@ -25,4 +25,4 @@ sp-core = { path = "../../primitives/core" } sp-rpc = { path = "../../primitives/rpc" } sp-runtime = { path = "../../primitives/runtime" } sp-version = { path = "../../primitives/version" } -jsonrpsee = { version = "0.16.2", features = ["server", "client-core", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } diff --git a/substrate/client/rpc-spec-v2/Cargo.toml b/substrate/client/rpc-spec-v2/Cargo.toml index 8ca4f321f71..45a1d862f04 100644 --- a/substrate/client/rpc-spec-v2/Cargo.toml +++ b/substrate/client/rpc-spec-v2/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } # Internal chain structures for "chain_spec". sc-chain-spec = { path = "../chain-spec" } # Pool for submitting extrinsics required by "transaction" @@ -47,6 +47,6 @@ sp-consensus = { path = "../../primitives/consensus/common" } sp-externalities = { path = "../../primitives/externalities" } sp-maybe-compressed-blob = { path = "../../primitives/maybe-compressed-blob" } sc-block-builder = { path = "../block-builder" } -sc-service = { path = "../service", features = ["test-helpers"]} +sc-service = { path = "../service", features = ["test-helpers"] } assert_matches = "1.3.0" pretty_assertions = "1.2.1" diff --git a/substrate/client/service/Cargo.toml b/substrate/client/service/Cargo.toml index 63c498964fe..ae03a5dab36 100644 --- a/substrate/client/service/Cargo.toml +++ b/substrate/client/service/Cargo.toml @@ -13,10 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [features] -default = [ "rocksdb" ] +default = ["rocksdb"] # The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass # a path to a database, an error will be produced at runtime. -rocksdb = [ "sc-client-db/rocksdb" ] +rocksdb = ["sc-client-db/rocksdb"] # exposes the client type test-helpers = [] runtime-benchmarks = [ @@ -59,7 +59,7 @@ sc-network-transactions = { path = "../network/transactions" } sc-chain-spec = { path = "../chain-spec" } sc-client-api = { path = "../api" } sp-api = { path = "../../primitives/api" } -sc-client-db = { path = "../db", default-features = false} +sc-client-db = { path = "../db", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1" } sc-executor = { path = "../executor" } sc-transaction-pool = { path = "../transaction-pool" } @@ -77,7 +77,7 @@ sc-sysinfo = { path = "../sysinfo" } tracing = "0.1.29" tracing-futures = { version = "0.2.4" } async-trait = "0.1.57" -tokio = { version = "1.22.0", features = ["time", "rt-multi-thread", "parking_lot"] } +tokio = { version = "1.22.0", features = ["parking_lot", "rt-multi-thread", "time"] } tempfile = "3.1.0" directories = "5.0.1" static_init = "1.0.3" diff --git a/substrate/client/service/test/Cargo.toml b/substrate/client/service/test/Cargo.toml index c6091f97d63..93576be9b59 100644 --- a/substrate/client/service/test/Cargo.toml +++ b/substrate/client/service/test/Cargo.toml @@ -23,12 +23,12 @@ tempfile = "3.1.0" tokio = { version = "1.22.0", features = ["time"] } sc-block-builder = { path = "../../block-builder" } sc-client-api = { path = "../../api" } -sc-client-db = { path = "../../db", default-features = false} +sc-client-db = { path = "../../db", default-features = false } sc-consensus = { path = "../../consensus/common" } sc-executor = { path = "../../executor" } sc-network = { path = "../../network" } sc-network-sync = { path = "../../network/sync" } -sc-service = { path = "..", features = ["test-helpers"]} +sc-service = { path = "..", features = ["test-helpers"] } sc-transaction-pool-api = { path = "../../transaction-pool/api" } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/client/storage-monitor/Cargo.toml b/substrate/client/storage-monitor/Cargo.toml index 66302982bee..c0eb9d94b92 100644 --- a/substrate/client/storage-monitor/Cargo.toml +++ b/substrate/client/storage-monitor/Cargo.toml @@ -12,7 +12,7 @@ homepage = "https://substrate.io" clap = { version = "4.4.10", features = ["derive", "string"] } log = "0.4.17" fs4 = "0.7.0" -sc-client-db = { path = "../db", default-features = false} +sc-client-db = { path = "../db", default-features = false } sp-core = { path = "../../primitives/core" } tokio = "1.22.0" thiserror = "1.0.48" diff --git a/substrate/client/sync-state-rpc/Cargo.toml b/substrate/client/sync-state-rpc/Cargo.toml index da5d22c2b9a..746f1c754f9 100644 --- a/substrate/client/sync-state-rpc/Cargo.toml +++ b/substrate/client/sync-state-rpc/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } serde = { version = "1.0.193", features = ["derive"] } serde_json = "1.0.108" thiserror = "1.0.48" diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index b134cbce3cc..2e4a0c8f3ee 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.38", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.38", features = ["extra-traits", "full", "parsing", "proc-macro"] } diff --git a/substrate/client/transaction-pool/api/Cargo.toml b/substrate/client/transaction-pool/api/Cargo.toml index dad1e52bb54..89981c27511 100644 --- a/substrate/client/transaction-pool/api/Cargo.toml +++ b/substrate/client/transaction-pool/api/Cargo.toml @@ -16,8 +16,8 @@ log = "0.4.17" serde = { version = "1.0.193", features = ["derive"] } thiserror = "1.0.48" sp-blockchain = { path = "../../../primitives/blockchain" } -sp-core = { path = "../../../primitives/core", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } [dev-dependencies] serde_json = "1.0.108" diff --git a/substrate/client/utils/Cargo.toml b/substrate/client/utils/Cargo.toml index 885b1d26a8e..da618b0259e 100644 --- a/substrate/client/utils/Cargo.toml +++ b/substrate/client/utils/Cargo.toml @@ -17,10 +17,10 @@ lazy_static = "1.4.0" log = "0.4" parking_lot = "0.12.1" prometheus = { version = "0.13.0", default-features = false } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } [features] -default = [ "metered" ] +default = ["metered"] metered = [] [dev-dependencies] diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 9f2f73ffb15..93306e8af3d 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # external deps parity-scale-codec = { version = "3.2.2", default-features = false, features = ["derive"] } -scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } +scale-info = { version = "2.6.0", default-features = false, features = ["derive"] } # primitive deps, used for developing FRAME pallets. sp-runtime = { default-features = false, path = "../primitives/runtime" } @@ -27,8 +27,8 @@ sp-core = { default-features = false, path = "../primitives/core" } sp-arithmetic = { default-features = false, path = "../primitives/arithmetic" } # frame deps, for developing FRAME pallets. -frame-support = { default-features = false, path = "support" } -frame-system = { default-features = false, path = "system" } +frame-support = { default-features = false, path = "support" } +frame-system = { default-features = false, path = "system" } # primitive types used for developing FRAME runtimes. sp-version = { default-features = false, path = "../primitives/version", optional = true } @@ -52,8 +52,8 @@ log = { version = "0.4.20", default-features = false } pallet-examples = { path = "./examples" } [features] -default = [ "runtime", "std" ] -experimental = [ "frame-support/experimental", "frame-system/experimental" ] +default = ["runtime", "std"] +experimental = ["frame-support/experimental", "frame-system/experimental"] runtime = [ "frame-executive", "frame-system-rpc-runtime-api", diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index d7d7352975a..1afff8ad43e 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -19,27 +19,27 @@ log = { version = "0.4.14", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } sp-core-hashing = { path = "../../primitives/core/hashing", default-features = false, optional = true } -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } pallet-identity = { path = "../identity", default-features = false } pallet-collective = { path = "../collective", default-features = false, optional = true } [dev-dependencies] array-bytes = "6.1" -sp-core-hashing = { path = "../../primitives/core/hashing", default-features = false} +sp-core-hashing = { path = "../../primitives/core/hashing", default-features = false } pallet-balances = { path = "../balances" } pallet-collective = { path = "../collective" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/asset-conversion/Cargo.toml b/substrate/frame/asset-conversion/Cargo.toml index de898d4ccde..5df86d402e0 100644 --- a/substrate/frame/asset-conversion/Cargo.toml +++ b/substrate/frame/asset-conversion/Cargo.toml @@ -14,24 +14,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../../primitives/api", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} +sp-api = { path = "../../primitives/api", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } pallet-assets = { path = "../assets" } -primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info", "num-traits"] } +primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "num-traits", "scale-info"] } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/asset-rate/Cargo.toml b/substrate/frame/asset-rate/Cargo.toml index 734bc5ef43f..af2776bba5f 100644 --- a/substrate/frame/asset-rate/Cargo.toml +++ b/substrate/frame/asset-rate/Cargo.toml @@ -16,20 +16,20 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false, optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false, optional = true } [dev-dependencies] pallet-balances = { path = "../balances" } sp-io = { path = "../../primitives/io" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index a48964f1366..87709af2727 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -16,15 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { path = "../../primitives/runtime", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } # Needed for type-safe access to storage DB. -frame-support = { path = "../support", default-features = false} +frame-support = { path = "../support", default-features = false } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { path = "../system", default-features = false} +frame-system = { path = "../system", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } [dev-dependencies] sp-std = { path = "../../primitives/std" } @@ -32,7 +32,7 @@ sp-io = { path = "../../primitives/io" } pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/atomic-swap/Cargo.toml b/substrate/frame/atomic-swap/Cargo.toml index 8315330d7fe..0a0f20eb8e8 100644 --- a/substrate/frame/atomic-swap/Cargo.toml +++ b/substrate/frame/atomic-swap/Cargo.toml @@ -15,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/aura/Cargo.toml b/substrate/frame/aura/Cargo.toml index bfe9193e9b5..321a19f74f9 100644 --- a/substrate/frame/aura/Cargo.toml +++ b/substrate/frame/aura/Cargo.toml @@ -16,20 +16,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-timestamp = { path = "../timestamp", default-features = false} -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false} -sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-timestamp = { path = "../timestamp", default-features = false } +sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } +sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/authority-discovery/Cargo.toml b/substrate/frame/authority-discovery/Cargo.toml index eb30ed3007c..7051276ad88 100644 --- a/substrate/frame/authority-discovery/Cargo.toml +++ b/substrate/frame/authority-discovery/Cargo.toml @@ -17,22 +17,22 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } pallet-session = { path = "../session", default-features = false, features = [ "historical", -]} -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false} -sp-authority-discovery = { path = "../../primitives/authority-discovery", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +] } +sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } +sp-authority-discovery = { path = "../../primitives/authority-discovery", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/authorship/Cargo.toml b/substrate/frame/authorship/Cargo.toml index bc1e6221a45..737c8da1361 100644 --- a/substrate/frame/authorship/Cargo.toml +++ b/substrate/frame/authorship/Cargo.toml @@ -18,17 +18,17 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/babe/Cargo.toml b/substrate/frame/babe/Cargo.toml index 2dc414a784d..defa89b4f28 100644 --- a/substrate/frame/babe/Cargo.toml +++ b/substrate/frame/babe/Cargo.toml @@ -16,20 +16,20 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-authorship = { path = "../authorship", default-features = false} -pallet-session = { path = "../session", default-features = false} -pallet-timestamp = { path = "../timestamp", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-authorship = { path = "../authorship", default-features = false } +pallet-session = { path = "../session", default-features = false } +pallet-timestamp = { path = "../timestamp", default-features = false } sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false} +sp-session = { path = "../../primitives/session", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] frame-election-provider-support = { path = "../election-provider-support" } @@ -40,7 +40,7 @@ pallet-staking-reward-curve = { path = "../staking/reward-curve" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/bags-list/Cargo.toml b/substrate/frame/bags-list/Cargo.toml index cb07ef94ff5..b99726ebf2d 100644 --- a/substrate/frame/bags-list/Cargo.toml +++ b/substrate/frame/bags-list/Cargo.toml @@ -50,7 +50,7 @@ frame-election-provider-support = { path = "../election-provider-support" } frame-benchmarking = { path = "../benchmarking" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/bags-list/fuzzer/Cargo.toml b/substrate/frame/bags-list/fuzzer/Cargo.toml index 9944c886554..f3785dd1bef 100644 --- a/substrate/frame/bags-list/fuzzer/Cargo.toml +++ b/substrate/frame/bags-list/fuzzer/Cargo.toml @@ -11,9 +11,9 @@ publish = false [dependencies] honggfuzz = "0.5" -rand = { version = "0.8", features = ["std", "small_rng"] } -frame-election-provider-support = { path = "../../election-provider-support", features = ["fuzz"]} -pallet-bags-list = { path = "..", features = ["fuzz"]} +rand = { version = "0.8", features = ["small_rng", "std"] } +frame-election-provider-support = { path = "../../election-provider-support", features = ["fuzz"] } +pallet-bags-list = { path = "..", features = ["fuzz"] } [[bin]] name = "bags-list" diff --git a/substrate/frame/bags-list/remote-tests/Cargo.toml b/substrate/frame/bags-list/remote-tests/Cargo.toml index b7408e08d55..169dd19db9a 100644 --- a/substrate/frame/bags-list/remote-tests/Cargo.toml +++ b/substrate/frame/bags-list/remote-tests/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime = { path = "../../../primitives/runtime" } sp-std = { path = "../../../primitives/std" } # utils -remote-externalities = { package = "frame-remote-externalities" , path = "../../../utils/frame/remote-externalities" } +remote-externalities = { package = "frame-remote-externalities", path = "../../../utils/frame/remote-externalities" } # others log = "0.4.17" diff --git a/substrate/frame/balances/Cargo.toml b/substrate/frame/balances/Cargo.toml index b91257df7b2..a148684e1fb 100644 --- a/substrate/frame/balances/Cargo.toml +++ b/substrate/frame/balances/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-transaction-payment = { path = "../transaction-payment" } @@ -29,7 +29,7 @@ sp-io = { path = "../../primitives/io" } paste = "1.0.12" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index ee2c13e7ffd..ee336def85c 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -14,26 +14,26 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", optional = true } -binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-beefy = { path = "../beefy", default-features = false} -pallet-mmr = { path = "../merkle-mountain-range", default-features = false} -pallet-session = { path = "../session", default-features = false} -sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-state-machine = { path = "../../primitives/state-machine", default-features = false} +binary-merkle-tree = { path = "../../utils/binary-merkle-tree", default-features = false } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-beefy = { path = "../beefy", default-features = false } +pallet-mmr = { path = "../merkle-mountain-range", default-features = false } +pallet-session = { path = "../session", default-features = false } +sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-state-machine = { path = "../../primitives/state-machine", default-features = false } [dev-dependencies] array-bytes = "6.1" sp-staking = { path = "../../primitives/staking" } [features] -default = [ "std" ] +default = ["std"] std = [ "array-bytes", "binary-merkle-tree/std", diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 9a88de1df15..4a77dd0e2ff 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -13,15 +13,15 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } serde = { version = "1.0.193", optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-authorship = { path = "../authorship", default-features = false} -pallet-session = { path = "../session", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-authorship = { path = "../authorship", default-features = false } +pallet-session = { path = "../session", default-features = false } sp-consensus-beefy = { path = "../../primitives/consensus/beefy", default-features = false, features = ["serde"] } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false} +sp-session = { path = "../../primitives/session", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] frame-election-provider-support = { path = "../election-provider-support" } @@ -33,10 +33,10 @@ pallet-timestamp = { path = "../timestamp" } sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } sp-staking = { path = "../../primitives/staking" } -sp-state-machine = { path = "../../primitives/state-machine", default-features = false} +sp-state-machine = { path = "../../primitives/state-machine", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-election-provider-support/std", diff --git a/substrate/frame/benchmarking/Cargo.toml b/substrate/frame/benchmarking/Cargo.toml index c3a17bc82b3..9cfaac1abfd 100644 --- a/substrate/frame/benchmarking/Cargo.toml +++ b/substrate/frame/benchmarking/Cargo.toml @@ -19,17 +19,17 @@ log = { version = "0.4.17", default-features = false } paste = "1.0" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", optional = true } -frame-support = { path = "../support", default-features = false} -frame-support-procedural = { path = "../support/procedural", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-storage = { path = "../../primitives/storage", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-support-procedural = { path = "../support/procedural", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-storage = { path = "../../primitives/storage", default-features = false } static_assertions = "1.1.0" [dev-dependencies] @@ -38,7 +38,7 @@ rusty-fork = { version = "0.3.0", default-features = false } sp-keystore = { path = "../../primitives/keystore" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support-procedural/std", diff --git a/substrate/frame/benchmarking/pov/Cargo.toml b/substrate/frame/benchmarking/pov/Cargo.toml index 0d935063e9e..1ec02828558 100644 --- a/substrate/frame/benchmarking/pov/Cargo.toml +++ b/substrate/frame/benchmarking/pov/Cargo.toml @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "..", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "..", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/bounties/Cargo.toml b/substrate/frame/bounties/Cargo.toml index 7da21140542..3b77a8448e3 100644 --- a/substrate/frame/bounties/Cargo.toml +++ b/substrate/frame/bounties/Cargo.toml @@ -18,20 +18,20 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-treasury = { path = "../treasury", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-treasury = { path = "../treasury", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/broker/Cargo.toml b/substrate/frame/broker/Cargo.toml index 142d0a0e35e..3470cf55b78 100644 --- a/substrate/frame/broker/Cargo.toml +++ b/substrate/frame/broker/Cargo.toml @@ -12,22 +12,22 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } bitvec = { version = "1.0.0", default-features = false } -sp-std = { path = "../../primitives/std", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } [dev-dependencies] sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "bitvec/std", diff --git a/substrate/frame/child-bounties/Cargo.toml b/substrate/frame/child-bounties/Cargo.toml index ac29bc4997b..8e9d9c17238 100644 --- a/substrate/frame/child-bounties/Cargo.toml +++ b/substrate/frame/child-bounties/Cargo.toml @@ -18,21 +18,21 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-bounties = { path = "../bounties", default-features = false} -pallet-treasury = { path = "../treasury", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-bounties = { path = "../bounties", default-features = false } +pallet-treasury = { path = "../treasury", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/collective/Cargo.toml b/substrate/frame/collective/Cargo.toml index 7f5e305e4f5..672468450c2 100644 --- a/substrate/frame/collective/Cargo.toml +++ b/substrate/frame/collective/Cargo.toml @@ -16,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 417f719d803..80856bef3fd 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -9,7 +9,7 @@ homepage = "https://substrate.io" repository.workspace = true description = "FRAME pallet for WASM contracts" readme = "README.md" -include = ["src/**/*", "benchmarks/**", "build.rs", "README.md", "CHANGELOG.md"] +include = ["CHANGELOG.md", "README.md", "benchmarks/**", "build.rs", "src/**/*"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -37,19 +37,19 @@ rand_pcg = { version = "0.3", optional = true } # Substrate Dependencies environmental = { version = "1.1.4", default-features = false } frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-balances = { path = "../balances", default-features = false , optional = true} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-balances = { path = "../balances", default-features = false, optional = true } pallet-contracts-uapi = { path = "uapi" } pallet-contracts-proc-macro = { path = "proc-macro" } -sp-api = { path = "../../primitives/api", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-api = { path = "../../primitives/api", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } -xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false} -xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false} +xcm = { package = "staging-xcm", path = "../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder", default-features = false } [dev-dependencies] array-bytes = "6.1" @@ -60,7 +60,7 @@ wat = "1" pallet-contracts-fixtures = { path = "./fixtures" } # Polkadot Dependencies -xcm-builder = {package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder"} +xcm-builder = { package = "staging-xcm-builder", path = "../../../polkadot/xcm/xcm-builder" } # Substrate Dependencies pallet-balances = { path = "../balances" } @@ -74,7 +74,7 @@ sp-keystore = { path = "../../primitives/keystore" } sp-tracing = { path = "../../primitives/tracing" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "environmental/std", diff --git a/substrate/frame/contracts/fixtures/Cargo.toml b/substrate/frame/contracts/fixtures/Cargo.toml index 257e01e50fa..2e95354ddc9 100644 --- a/substrate/frame/contracts/fixtures/Cargo.toml +++ b/substrate/frame/contracts/fixtures/Cargo.toml @@ -20,5 +20,3 @@ toml = "0.8.8" twox-hash = "1.6.3" anyhow = "1.0.0" cfg-if = { version = "1.0", default-features = false } - - diff --git a/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml b/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml index b70db7cde45..127bb575088 100644 --- a/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml +++ b/substrate/frame/contracts/fixtures/contracts/common/Cargo.toml @@ -6,4 +6,3 @@ authors.workspace = true edition.workspace = true license.workspace = true description = "Common utilities for pallet-contracts-fixtures." - diff --git a/substrate/frame/contracts/mock-network/Cargo.toml b/substrate/frame/contracts/mock-network/Cargo.toml index 28e24dd15e4..9c6231a783a 100644 --- a/substrate/frame/contracts/mock-network/Cargo.toml +++ b/substrate/frame/contracts/mock-network/Cargo.toml @@ -9,36 +9,36 @@ repository.workspace = true description = "A mock network for testing pallet-contracts" [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } pallet-assets = { path = "../../assets" } pallet-balances = { path = "../../balances" } pallet-contracts = { path = ".." } -pallet-contracts-uapi = { path = "../uapi", default-features = false} +pallet-contracts-uapi = { path = "../uapi", default-features = false } pallet-contracts-proc-macro = { path = "../proc-macro" } pallet-insecure-randomness-collective-flip = { path = "../../insecure-randomness-collective-flip" } pallet-message-queue = { path = "../../message-queue" } pallet-proxy = { path = "../../proxy" } pallet-timestamp = { path = "../../timestamp" } pallet-utility = { path = "../../utility" } -pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false} -polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } -polkadot-primitives = { path = "../../../../polkadot/primitives" } -polkadot-runtime-parachains = {path = "../../../../polkadot/runtime/parachains"} +pallet-xcm = { path = "../../../../polkadot/xcm/pallet-xcm", default-features = false } +polkadot-parachain-primitives = { path = "../../../../polkadot/parachain" } +polkadot-primitives = { path = "../../../../polkadot/primitives" } +polkadot-runtime-parachains = { path = "../../../../polkadot/runtime/parachains" } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} +sp-api = { path = "../../../primitives/api", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } sp-keystore = { path = "../../../primitives/keystore" } -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } sp-tracing = { path = "../../../primitives/tracing" } -xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false} -xcm-builder = {package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder"} -xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false} -xcm-simulator = {path = "../../../../polkadot/xcm/xcm-simulator"} +xcm = { package = "staging-xcm", path = "../../../../polkadot/xcm", default-features = false } +xcm-builder = { package = "staging-xcm-builder", path = "../../../../polkadot/xcm/xcm-builder" } +xcm-executor = { package = "staging-xcm-executor", path = "../../../../polkadot/xcm/xcm-executor", default-features = false } +xcm-simulator = { path = "../../../../polkadot/xcm/xcm-simulator" } [dev-dependencies] assert_matches = "1" @@ -46,7 +46,7 @@ pretty_assertions = "1" pallet-contracts-fixtures = { path = "../fixtures" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/contracts/primitives/Cargo.toml b/substrate/frame/contracts/primitives/Cargo.toml new file mode 100644 index 00000000000..f821797c923 --- /dev/null +++ b/substrate/frame/contracts/primitives/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "pallet-contracts-primitives" +version = "24.0.0" +authors.workspace = true +edition.workspace = true +license = "Apache-2.0" +homepage = "https://substrate.io" +repository.workspace = true +description = "A crate that hosts a common definitions that are relevant for the pallet-contracts." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +bitflags = "1.0" +scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } + +# Substrate Dependencies (This crate should not rely on frame) +sp-std = { path = "../../../primitives/std", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-weights = { path = "../../../primitives/weights", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-runtime/std", + "sp-std/std", + "sp-weights/std", +] diff --git a/substrate/frame/contracts/uapi/Cargo.toml b/substrate/frame/contracts/uapi/Cargo.toml index 6717d574dfe..df45872349d 100644 --- a/substrate/frame/contracts/uapi/Cargo.toml +++ b/substrate/frame/contracts/uapi/Cargo.toml @@ -11,13 +11,12 @@ description = "Exposes all the host functions that a contract can import." [dependencies] paste = { version = "1.0", default-features = false } bitflags = "1.0" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"], optional = true } +scale-info = { version = "2.10.0", default-features = false, features = ["derive"], optional = true } scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", "max-encoded-len", ], optional = true } [features] -default = [ "scale" ] -scale = [ "dep:scale", "scale-info" ] - +default = ["scale"] +scale = ["dep:scale", "scale-info"] diff --git a/substrate/frame/conviction-voting/Cargo.toml b/substrate/frame/conviction-voting/Cargo.toml index bc0ff0f9dc5..19eb6d09fc1 100644 --- a/substrate/frame/conviction-voting/Cargo.toml +++ b/substrate/frame/conviction-voting/Cargo.toml @@ -20,12 +20,12 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", features = ["derive"], optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -33,7 +33,7 @@ pallet-scheduler = { path = "../scheduler" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/core-fellowship/Cargo.toml b/substrate/frame/core-fellowship/Cargo.toml index 523a5bb90a0..c6f99cdaab2 100644 --- a/substrate/frame/core-fellowship/Cargo.toml +++ b/substrate/frame/core-fellowship/Cargo.toml @@ -16,17 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/democracy/Cargo.toml b/substrate/frame/democracy/Cargo.toml index 5be38214cf8..061eee106d4 100644 --- a/substrate/frame/democracy/Cargo.toml +++ b/substrate/frame/democracy/Cargo.toml @@ -18,13 +18,13 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", features = ["derive"], optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -33,7 +33,7 @@ pallet-scheduler = { path = "../scheduler" } pallet-preimage = { path = "../preimage" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/election-provider-multi-phase/Cargo.toml b/substrate/frame/election-provider-multi-phase/Cargo.toml index 91be97d3e35..f1640f7cc70 100644 --- a/substrate/frame/election-provider-multi-phase/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/Cargo.toml @@ -20,35 +20,35 @@ scale-info = { version = "2.10.0", default-features = false, features = [ ] } log = { version = "0.4.17", default-features = false } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -frame-election-provider-support = { path = "../election-provider-support", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +frame-election-provider-support = { path = "../election-provider-support", default-features = false } # Optional imports for benchmarking frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } pallet-election-provider-support-benchmarking = { path = "../election-provider-support/benchmarking", default-features = false, optional = true } rand = { version = "0.8.5", default-features = false, features = ["alloc", "small_rng"], optional = true } -strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } +strum = { version = "0.24.1", default-features = false, features = ["derive"], optional = true } [dev-dependencies] parking_lot = "0.12.1" rand = "0.8.5" -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io" } -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false} +sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } sp-tracing = { path = "../../primitives/tracing" } pallet-balances = { path = "../balances" } frame-benchmarking = { path = "../benchmarking" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml index f5d1991d199..44295d5e20d 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/Cargo.toml @@ -23,7 +23,7 @@ sp-io = { path = "../../../primitives/io" } sp-std = { path = "../../../primitives/std" } sp-staking = { path = "../../../primitives/staking" } sp-core = { path = "../../../primitives/core" } -sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false} +sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false } sp-tracing = { path = "../../../primitives/tracing" } frame-system = { path = "../../system" } diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index ed36630d0d0..7062e54cdbc 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -15,13 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-election-provider-solution-type = { path = "solution-type" } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } [dev-dependencies] rand = { version = "0.8.5", features = ["small_rng"] } @@ -29,8 +29,8 @@ sp-io = { path = "../../primitives/io" } sp-npos-elections = { path = "../../primitives/npos-elections" } [features] -default = [ "std" ] -fuzz = [ "default" ] +default = ["std"] +fuzz = ["default"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/election-provider-support/benchmarking/Cargo.toml b/substrate/frame/election-provider-support/benchmarking/Cargo.toml index a8c56b425fd..b1e3564b4d4 100644 --- a/substrate/frame/election-provider-support/benchmarking/Cargo.toml +++ b/substrate/frame/election-provider-support/benchmarking/Cargo.toml @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} -frame-election-provider-support = { path = "..", default-features = false} -frame-system = { path = "../../system", default-features = false} -sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-election-provider-support = { path = "..", default-features = false } +frame-system = { path = "../../system", default-features = false } +sp-npos-elections = { path = "../../../primitives/npos-elections", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml index 50f5bd908c7..144ed8b18c5 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.4.10", features = ["derive"] } honggfuzz = "0.5" -rand = { version = "0.8", features = ["std", "small_rng"] } +rand = { version = "0.8", features = ["small_rng", "std"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } @@ -24,7 +24,7 @@ frame-election-provider-support = { path = "../.." } sp-arithmetic = { path = "../../../../primitives/arithmetic" } sp-runtime = { path = "../../../../primitives/runtime" } # used by generate_solution_type: -sp-npos-elections = { path = "../../../../primitives/npos-elections", default-features = false} +sp-npos-elections = { path = "../../../../primitives/npos-elections", default-features = false } frame-support = { path = "../../../support" } [[bin]] diff --git a/substrate/frame/elections-phragmen/Cargo.toml b/substrate/frame/elections-phragmen/Cargo.toml index cb8bc1035a5..ffb939fa4d2 100644 --- a/substrate/frame/elections-phragmen/Cargo.toml +++ b/substrate/frame/elections-phragmen/Cargo.toml @@ -18,15 +18,15 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.14", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-npos-elections = { path = "../../primitives/npos-elections", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -35,7 +35,7 @@ sp-tracing = { path = "../../primitives/tracing" } substrate-test-utils = { path = "../../test-utils" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 1b215022715..779baa432fc 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -13,16 +13,16 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -pallet-default-config-example = { path = "default-config", default-features = false} -pallet-dev-mode = { path = "dev-mode", default-features = false} -pallet-example-basic = { path = "basic", default-features = false} +pallet-default-config-example = { path = "default-config", default-features = false } +pallet-dev-mode = { path = "dev-mode", default-features = false } +pallet-example-basic = { path = "basic", default-features = false } pallet-example-frame-crate = { path = "frame-crate", default-features = false } -pallet-example-kitchensink = { path = "kitchensink", default-features = false} -pallet-example-offchain-worker = { path = "offchain-worker", default-features = false} -pallet-example-split = { path = "split", default-features = false} +pallet-example-kitchensink = { path = "kitchensink", default-features = false } +pallet-example-offchain-worker = { path = "offchain-worker", default-features = false } +pallet-example-split = { path = "split", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "pallet-default-config-example/std", "pallet-dev-mode/std", diff --git a/substrate/frame/examples/basic/Cargo.toml b/substrate/frame/examples/basic/Cargo.toml index d39a93e7abb..53da9ac2eba 100644 --- a/substrate/frame/examples/basic/Cargo.toml +++ b/substrate/frame/examples/basic/Cargo.toml @@ -16,19 +16,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-balances = { path = "../../balances", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-balances = { path = "../../balances", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/examples/default-config/Cargo.toml b/substrate/frame/examples/default-config/Cargo.toml index 13b6ce74543..0d80e6838f0 100644 --- a/substrate/frame/examples/default-config/Cargo.toml +++ b/substrate/frame/examples/default-config/Cargo.toml @@ -16,15 +16,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/examples/dev-mode/Cargo.toml b/substrate/frame/examples/dev-mode/Cargo.toml index 806af334bb0..ce558fe087b 100644 --- a/substrate/frame/examples/dev-mode/Cargo.toml +++ b/substrate/frame/examples/dev-mode/Cargo.toml @@ -16,18 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-balances = { path = "../../balances", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-balances = { path = "../../balances", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/examples/frame-crate/Cargo.toml b/substrate/frame/examples/frame-crate/Cargo.toml index ceb8c7bfb81..586b6c0216e 100644 --- a/substrate/frame/examples/frame-crate/Cargo.toml +++ b/substrate/frame/examples/frame-crate/Cargo.toml @@ -16,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -frame = { path = "../..", default-features = false, features = ["runtime", "experimental"] } +frame = { path = "../..", default-features = false, features = ["experimental", "runtime"] } [features] -default = [ "std" ] -std = [ "codec/std", "frame/std", "scale-info/std" ] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/substrate/frame/examples/kitchensink/Cargo.toml b/substrate/frame/examples/kitchensink/Cargo.toml index 1275ef0b53f..e63e1192458 100644 --- a/substrate/frame/examples/kitchensink/Cargo.toml +++ b/substrate/frame/examples/kitchensink/Cargo.toml @@ -16,22 +16,22 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } -pallet-balances = { path = "../../balances", default-features = false} +pallet-balances = { path = "../../balances", default-features = false } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/examples/offchain-worker/Cargo.toml b/substrate/frame/examples/offchain-worker/Cargo.toml index e6b7715655d..9d0b682ee02 100644 --- a/substrate/frame/examples/offchain-worker/Cargo.toml +++ b/substrate/frame/examples/offchain-worker/Cargo.toml @@ -17,16 +17,16 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = lite-json = { version = "0.2.0", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-keystore = { path = "../../../primitives/keystore", optional = true} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-keystore = { path = "../../../primitives/keystore", optional = true } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/examples/split/Cargo.toml b/substrate/frame/examples/split/Cargo.toml index db2a75e388d..b6b9ea86dfe 100644 --- a/substrate/frame/examples/split/Cargo.toml +++ b/substrate/frame/examples/split/Cargo.toml @@ -17,19 +17,19 @@ codec = { package = "parity-scale-codec", version = "3.2.2", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } -sp-io = { path = "../../../primitives/io", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-io = { path = "../../../primitives/io", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } -frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", @@ -46,4 +46,4 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] -try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime" ] +try-runtime = ["frame-support/try-runtime", "frame-system/try-runtime"] diff --git a/substrate/frame/executive/Cargo.toml b/substrate/frame/executive/Cargo.toml index 32983a32c4f..c2a92ad3d72 100644 --- a/substrate/frame/executive/Cargo.toml +++ b/substrate/frame/executive/Cargo.toml @@ -18,14 +18,14 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } frame-try-runtime = { path = "../try-runtime", default-features = false, optional = true } -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-tracing = { path = "../../primitives/tracing", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-tracing = { path = "../../primitives/tracing", default-features = false } [dev-dependencies] array-bytes = "6.1" @@ -37,8 +37,8 @@ sp-io = { path = "../../primitives/io" } sp-version = { path = "../../primitives/version" } [features] -default = [ "std" ] -with-tracing = [ "sp-tracing/with-tracing" ] +default = ["std"] +with-tracing = ["sp-tracing/with-tracing"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/fast-unstake/Cargo.toml b/substrate/frame/fast-unstake/Cargo.toml index 2aa2e918f3e..4440a0c0f64 100644 --- a/substrate/frame/fast-unstake/Cargo.toml +++ b/substrate/frame/fast-unstake/Cargo.toml @@ -16,22 +16,22 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} -frame-election-provider-support = { path = "../election-provider-support", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } +frame-election-provider-support = { path = "../election-provider-support", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } docify = "0.2.6" [dev-dependencies] pallet-staking-reward-curve = { path = "../staking/reward-curve" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } substrate-test-utils = { path = "../../test-utils" } sp-tracing = { path = "../../primitives/tracing" } pallet-staking = { path = "../staking" } @@ -39,7 +39,7 @@ pallet-balances = { path = "../balances" } pallet-timestamp = { path = "../timestamp" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/glutton/Cargo.toml b/substrate/frame/glutton/Cargo.toml index 368fcab65cc..81388d0e0f5 100644 --- a/substrate/frame/glutton/Cargo.toml +++ b/substrate/frame/glutton/Cargo.toml @@ -17,19 +17,19 @@ blake2 = { version = "0.10.4", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.14", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "blake2/std", "codec/std", diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 5eacc21721b..b4444d4580c 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -16,19 +16,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-authorship = { path = "../authorship", default-features = false} -pallet-session = { path = "../session", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-authorship = { path = "../authorship", default-features = false } +pallet-session = { path = "../session", default-features = false } sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false} +sp-session = { path = "../../primitives/session", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] grandpa = { package = "finality-grandpa", version = "0.16.2", features = ["derive-codec"] } @@ -42,7 +42,7 @@ pallet-timestamp = { path = "../timestamp" } sp-keyring = { path = "../../primitives/keyring" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/identity/Cargo.toml b/substrate/frame/identity/Cargo.toml index 309c0aab550..894365748ce 100644 --- a/substrate/frame/identity/Cargo.toml +++ b/substrate/frame/identity/Cargo.toml @@ -16,19 +16,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } enumflags2 = { version = "0.7.7" } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "enumflags2/std", diff --git a/substrate/frame/im-online/Cargo.toml b/substrate/frame/im-online/Cargo.toml index d83ff540648..5ec260c9b5b 100644 --- a/substrate/frame/im-online/Cargo.toml +++ b/substrate/frame/im-online/Cargo.toml @@ -16,22 +16,22 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-authorship = { path = "../authorship", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-authorship = { path = "../authorship", default-features = false } sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-session = { path = "../session" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/indices/Cargo.toml b/substrate/frame/indices/Cargo.toml index d392522718a..4f12ecfcce3 100644 --- a/substrate/frame/indices/Cargo.toml +++ b/substrate/frame/indices/Cargo.toml @@ -15,20 +15,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-keyring = { path = "../../primitives/keyring", optional = true} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-keyring = { path = "../../primitives/keyring", optional = true } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml index 07c5e3997d2..f7afbb95397 100644 --- a/substrate/frame/insecure-randomness-collective-flip/Cargo.toml +++ b/substrate/frame/insecure-randomness-collective-flip/Cargo.toml @@ -16,17 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } safe-mix = { version = "1.0", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/lottery/Cargo.toml b/substrate/frame/lottery/Cargo.toml index a4942abf243..bba17d2718c 100644 --- a/substrate/frame/lottery/Cargo.toml +++ b/substrate/frame/lottery/Cargo.toml @@ -16,11 +16,11 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] frame-support-test = { path = "../support/test" } @@ -29,7 +29,7 @@ sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/membership/Cargo.toml b/substrate/frame/membership/Cargo.toml index 18c771bf72c..f90d70e911d 100644 --- a/substrate/frame/membership/Cargo.toml +++ b/substrate/frame/membership/Cargo.toml @@ -16,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/merkle-mountain-range/Cargo.toml b/substrate/frame/merkle-mountain-range/Cargo.toml index 2c30af43b67..1f31e704928 100644 --- a/substrate/frame/merkle-mountain-range/Cargo.toml +++ b/substrate/frame/merkle-mountain-range/Cargo.toml @@ -15,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-mmr-primitives = { path = "../../primitives/merkle-mountain-range", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-mmr-primitives = { path = "../../primitives/merkle-mountain-range", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] array-bytes = "6.1" @@ -30,7 +30,7 @@ env_logger = "0.9" itertools = "0.10.3" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 98ca3f60cc8..148848f2bf0 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -14,16 +14,16 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive serde = { version = "1.0.193", optional = true, features = ["derive"] } log = { version = "0.4.17", default-features = false } -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-weights = { path = "../../primitives/weights", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-weights = { path = "../../primitives/weights", default-features = false } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } [dev-dependencies] sp-tracing = { path = "../../primitives/tracing" } @@ -31,7 +31,7 @@ rand = "0.8.5" rand_distr = "0.4.3" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/mixnet/Cargo.toml b/substrate/frame/mixnet/Cargo.toml index 12597b8b5d5..de5f7411015 100644 --- a/substrate/frame/mixnet/Cargo.toml +++ b/substrate/frame/mixnet/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime = { default-features = false, path = "../../primitives/runtime" } sp-std = { default-features = false, path = "../../primitives/std" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/multisig/Cargo.toml b/substrate/frame/multisig/Cargo.toml index a2ee608c33c..f1ff19c0e34 100644 --- a/substrate/frame/multisig/Cargo.toml +++ b/substrate/frame/multisig/Cargo.toml @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } # third party log = { version = "0.4.17", default-features = false } @@ -29,7 +29,7 @@ log = { version = "0.4.17", default-features = false } pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/nft-fractionalization/Cargo.toml b/substrate/frame/nft-fractionalization/Cargo.toml index a2cb9a4aec9..19814d67d79 100644 --- a/substrate/frame/nft-fractionalization/Cargo.toml +++ b/substrate/frame/nft-fractionalization/Cargo.toml @@ -16,13 +16,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-assets = { path = "../assets", default-features = false} -pallet-nfts = { path = "../nfts", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-assets = { path = "../assets", default-features = false } +pallet-nfts = { path = "../nfts", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -31,7 +31,7 @@ sp-io = { path = "../../primitives/io" } sp-std = { path = "../../primitives/std" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/nfts/Cargo.toml b/substrate/frame/nfts/Cargo.toml index 2a3b2921c75..a70b55bc45e 100644 --- a/substrate/frame/nfts/Cargo.toml +++ b/substrate/frame/nfts/Cargo.toml @@ -17,20 +17,20 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = enumflags2 = { version = "0.7.7" } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-keystore = { path = "../../primitives/keystore" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "enumflags2/std", diff --git a/substrate/frame/nfts/runtime-api/Cargo.toml b/substrate/frame/nfts/runtime-api/Cargo.toml index 092edaaaa89..8e1424a588a 100644 --- a/substrate/frame/nfts/runtime-api/Cargo.toml +++ b/substrate/frame/nfts/runtime-api/Cargo.toml @@ -19,5 +19,5 @@ sp-api = { path = "../../../primitives/api", default-features = false } sp-std = { path = "../../../primitives/std", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "pallet-nfts/std", "sp-api/std", "sp-std/std" ] +default = ["std"] +std = ["codec/std", "pallet-nfts/std", "sp-api/std", "sp-std/std"] diff --git a/substrate/frame/nicks/Cargo.toml b/substrate/frame/nicks/Cargo.toml index b8100d07435..8636ac34a13 100644 --- a/substrate/frame/nicks/Cargo.toml +++ b/substrate/frame/nicks/Cargo.toml @@ -15,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/nis/Cargo.toml b/substrate/frame/nis/Cargo.toml index 986568ea722..ec251ef6536 100644 --- a/substrate/frame/nis/Cargo.toml +++ b/substrate/frame/nis/Cargo.toml @@ -15,20 +15,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/node-authorization/Cargo.toml b/substrate/frame/node-authorization/Cargo.toml index e5a504e2a0f..abc03a8b0f4 100644 --- a/substrate/frame/node-authorization/Cargo.toml +++ b/substrate/frame/node-authorization/Cargo.toml @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/nomination-pools/Cargo.toml b/substrate/frame/nomination-pools/Cargo.toml index 3c55822b9a5..bc24deb6f15 100644 --- a/substrate/frame/nomination-pools/Cargo.toml +++ b/substrate/frame/nomination-pools/Cargo.toml @@ -17,13 +17,13 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # FRAME -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } log = { version = "0.4.0", default-features = false } # Optional: use for testing and/or fuzzing @@ -35,8 +35,8 @@ pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } [features] -default = [ "std" ] -fuzzing = [ "pallet-balances", "sp-tracing" ] +default = ["std"] +fuzzing = ["pallet-balances", "sp-tracing"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/nomination-pools/benchmarking/Cargo.toml b/substrate/frame/nomination-pools/benchmarking/Cargo.toml index e8b18666815..d522dff82ba 100644 --- a/substrate/frame/nomination-pools/benchmarking/Cargo.toml +++ b/substrate/frame/nomination-pools/benchmarking/Cargo.toml @@ -18,29 +18,29 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } # FRAME -frame-benchmarking = { path = "../../benchmarking", default-features = false} -frame-election-provider-support = { path = "../../election-provider-support", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-bags-list = { path = "../../bags-list", default-features = false} -pallet-staking = { path = "../../staking", default-features = false} -pallet-nomination-pools = { path = "..", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false } +frame-election-provider-support = { path = "../../election-provider-support", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-bags-list = { path = "../../bags-list", default-features = false } +pallet-staking = { path = "../../staking", default-features = false } +pallet-nomination-pools = { path = "..", default-features = false } # Substrate Primitives -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false} -sp-staking = { path = "../../../primitives/staking", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-runtime-interface = { path = "../../../primitives/runtime-interface", default-features = false } +sp-staking = { path = "../../../primitives/staking", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] -pallet-balances = { path = "../../balances", default-features = false} +pallet-balances = { path = "../../balances", default-features = false } pallet-timestamp = { path = "../../timestamp" } pallet-staking-reward-curve = { path = "../../staking/reward-curve" } sp-core = { path = "../../../primitives/core" } sp-io = { path = "../../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", diff --git a/substrate/frame/nomination-pools/runtime-api/Cargo.toml b/substrate/frame/nomination-pools/runtime-api/Cargo.toml index c3aa8035c95..86d1496a41b 100644 --- a/substrate/frame/nomination-pools/runtime-api/Cargo.toml +++ b/substrate/frame/nomination-pools/runtime-api/Cargo.toml @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../../primitives/api", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} -pallet-nomination-pools = { path = "..", default-features = false} +sp-api = { path = "../../../primitives/api", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +pallet-nomination-pools = { path = "..", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "pallet-nomination-pools/std", "sp-api/std", "sp-std/std" ] +default = ["std"] +std = ["codec/std", "pallet-nomination-pools/std", "sp-api/std", "sp-std/std"] diff --git a/substrate/frame/offences/Cargo.toml b/substrate/frame/offences/Cargo.toml index c1cb950069a..0f153d9eca6 100644 --- a/substrate/frame/offences/Cargo.toml +++ b/substrate/frame/offences/Cargo.toml @@ -17,19 +17,19 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-balances = { path = "../balances", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-balances = { path = "../balances", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/offences/benchmarking/Cargo.toml b/substrate/frame/offences/benchmarking/Cargo.toml index acd8447c054..4de239296a9 100644 --- a/substrate/frame/offences/benchmarking/Cargo.toml +++ b/substrate/frame/offences/benchmarking/Cargo.toml @@ -15,20 +15,20 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false} -frame-election-provider-support = { path = "../../election-provider-support", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-babe = { path = "../../babe", default-features = false} -pallet-balances = { path = "../../balances", default-features = false} -pallet-grandpa = { path = "../../grandpa", default-features = false} -pallet-im-online = { path = "../../im-online", default-features = false} -pallet-offences = { path = "..", default-features = false} -pallet-session = { path = "../../session", default-features = false} -pallet-staking = { path = "../../staking", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-staking = { path = "../../../primitives/staking", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false } +frame-election-provider-support = { path = "../../election-provider-support", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-babe = { path = "../../babe", default-features = false } +pallet-balances = { path = "../../balances", default-features = false } +pallet-grandpa = { path = "../../grandpa", default-features = false } +pallet-im-online = { path = "../../im-online", default-features = false } +pallet-offences = { path = "..", default-features = false } +pallet-session = { path = "../../session", default-features = false } +pallet-staking = { path = "../../staking", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-staking = { path = "../../../primitives/staking", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -38,7 +38,7 @@ sp-core = { path = "../../../primitives/core" } sp-io = { path = "../../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/paged-list/Cargo.toml b/substrate/frame/paged-list/Cargo.toml index 4bc3dd6a3c7..676e1866a43 100644 --- a/substrate/frame/paged-list/Cargo.toml +++ b/substrate/frame/paged-list/Cargo.toml @@ -12,22 +12,22 @@ repository.workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = [ "derive"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } docify = "0.2.6" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } -sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false, optional = true} +sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false, optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", @@ -55,4 +55,4 @@ try-runtime = [ "sp-runtime/try-runtime", ] -frame-metadata = [ "sp-metadata-ir" ] +frame-metadata = ["sp-metadata-ir"] diff --git a/substrate/frame/paged-list/fuzzer/Cargo.toml b/substrate/frame/paged-list/fuzzer/Cargo.toml index d96c0348cf4..d6590373813 100644 --- a/substrate/frame/paged-list/fuzzer/Cargo.toml +++ b/substrate/frame/paged-list/fuzzer/Cargo.toml @@ -17,6 +17,6 @@ path = "src/paged_list.rs" arbitrary = "1.3.0" honggfuzz = "0.5.49" -frame-support = { path = "../../support", default-features = false, features = [ "std" ]} -sp-io = { path = "../../../primitives/io", default-features = false, features = [ "std" ] } -pallet-paged-list = { path = "..", default-features = false, features = [ "std" ] } +frame-support = { path = "../../support", default-features = false, features = ["std"] } +sp-io = { path = "../../../primitives/io", default-features = false, features = ["std"] } +pallet-paged-list = { path = "..", default-features = false, features = ["std"] } diff --git a/substrate/frame/preimage/Cargo.toml b/substrate/frame/preimage/Cargo.toml index a80ccd5a40d..1806976ac96 100644 --- a/substrate/frame/preimage/Cargo.toml +++ b/substrate/frame/preimage/Cargo.toml @@ -11,21 +11,21 @@ description = "FRAME pallet for storing preimages of hashes" [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false, optional = true} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false, optional = true } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking", "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/proxy/Cargo.toml b/substrate/frame/proxy/Cargo.toml index 647193fad8a..00a2692a820 100644 --- a/substrate/frame/proxy/Cargo.toml +++ b/substrate/frame/proxy/Cargo.toml @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -28,7 +28,7 @@ pallet-utility = { path = "../utility" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/ranked-collective/Cargo.toml b/substrate/frame/ranked-collective/Cargo.toml index 236489c54b5..145eff3b0ee 100644 --- a/substrate/frame/ranked-collective/Cargo.toml +++ b/substrate/frame/ranked-collective/Cargo.toml @@ -16,17 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/recovery/Cargo.toml b/substrate/frame/recovery/Cargo.toml index 8e240546fdd..d2cd2d1a4ca 100644 --- a/substrate/frame/recovery/Cargo.toml +++ b/substrate/frame/recovery/Cargo.toml @@ -15,26 +15,26 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ - 'frame-benchmarking', "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "sp-runtime/runtime-benchmarks", + 'frame-benchmarking', ] std = [ "codec/std", diff --git a/substrate/frame/referenda/Cargo.toml b/substrate/frame/referenda/Cargo.toml index d892fa32a6a..1747d0ac22b 100644 --- a/substrate/frame/referenda/Cargo.toml +++ b/substrate/frame/referenda/Cargo.toml @@ -19,13 +19,13 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", features = ["derive"], optional = true } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] @@ -36,7 +36,7 @@ pallet-scheduler = { path = "../scheduler" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/remark/Cargo.toml b/substrate/frame/remark/Cargo.toml index fe80d03c8d2..9b0c6870d05 100644 --- a/substrate/frame/remark/Cargo.toml +++ b/substrate/frame/remark/Cargo.toml @@ -16,19 +16,19 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/substrate/frame/root-offences/Cargo.toml b/substrate/frame/root-offences/Cargo.toml index 8e6fddb4335..a17bd51684e 100644 --- a/substrate/frame/root-offences/Cargo.toml +++ b/substrate/frame/root-offences/Cargo.toml @@ -16,13 +16,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -pallet-session = { path = "../session", default-features = false , features = [ "historical" ]} -pallet-staking = { path = "../staking", default-features = false} +pallet-session = { path = "../session", default-features = false, features = ["historical"] } +pallet-staking = { path = "../staking", default-features = false } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } sp-runtime = { path = "../../primitives/runtime" } -sp-staking = { path = "../../primitives/staking", default-features = false} +sp-staking = { path = "../../primitives/staking", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -30,8 +30,8 @@ pallet-timestamp = { path = "../timestamp" } pallet-staking-reward-curve = { path = "../staking/reward-curve" } sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } frame-election-provider-support = { path = "../election-provider-support" } @@ -56,7 +56,7 @@ try-runtime = [ "pallet-timestamp/try-runtime", "sp-runtime/try-runtime", ] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-election-provider-support/std", diff --git a/substrate/frame/root-testing/Cargo.toml b/substrate/frame/root-testing/Cargo.toml index 7837289cec5..f4e914c86b1 100644 --- a/substrate/frame/root-testing/Cargo.toml +++ b/substrate/frame/root-testing/Cargo.toml @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] try-runtime = [ @@ -28,7 +28,7 @@ try-runtime = [ "frame-system/try-runtime", "sp-runtime/try-runtime", ] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/safe-mode/Cargo.toml b/substrate/frame/safe-mode/Cargo.toml index f7b4ea4dd8c..d33e0b7144a 100644 --- a/substrate/frame/safe-mode/Cargo.toml +++ b/substrate/frame/safe-mode/Cargo.toml @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } docify = "0.2.6" -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-utility = { path = "../utility", default-features = false, optional = true } -pallet-proxy = { path = "../proxy", default-features = false, optional = true } +pallet-utility = { path = "../utility", default-features = false, optional = true } +pallet-proxy = { path = "../proxy", default-features = false, optional = true } [dev-dependencies] sp-core = { path = "../../primitives/core" } @@ -34,7 +34,7 @@ pallet-proxy = { path = "../proxy" } frame-support = { path = "../support", features = ["experimental"] } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/salary/Cargo.toml b/substrate/frame/salary/Cargo.toml index 6c66f01082d..18636a60cdb 100644 --- a/substrate/frame/salary/Cargo.toml +++ b/substrate/frame/salary/Cargo.toml @@ -16,17 +16,17 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.16", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/scheduler/Cargo.toml b/substrate/frame/scheduler/Cargo.toml index 6aa81baf7ac..1dd2e53d054 100644 --- a/substrate/frame/scheduler/Cargo.toml +++ b/substrate/frame/scheduler/Cargo.toml @@ -13,22 +13,22 @@ readme = "README.md" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-weights = { path = "../../primitives/weights", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-weights = { path = "../../primitives/weights", default-features = false } docify = "0.2.6" [dev-dependencies] pallet-preimage = { path = "../preimage" } -sp-core = { path = "../../primitives/core", default-features = false} +sp-core = { path = "../../primitives/core", default-features = false } substrate-test-utils = { path = "../../test-utils" } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "frame-benchmarking", "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/scored-pool/Cargo.toml b/substrate/frame/scored-pool/Cargo.toml index 81707382693..8293c81f590 100644 --- a/substrate/frame/scored-pool/Cargo.toml +++ b/substrate/frame/scored-pool/Cargo.toml @@ -15,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/session/Cargo.toml b/substrate/frame/session/Cargo.toml index 246dec63bba..0a997f6ddb3 100644 --- a/substrate/frame/session/Cargo.toml +++ b/substrate/frame/session/Cargo.toml @@ -17,21 +17,21 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-timestamp = { path = "../timestamp", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-timestamp = { path = "../timestamp", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-session = { path = "../../primitives/session", default-features = false} +sp-session = { path = "../../primitives/session", default-features = false } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} -sp-trie = { path = "../../primitives/trie", default-features = false, optional = true} -sp-state-machine = { path = "../../primitives/state-machine", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +sp-trie = { path = "../../primitives/trie", default-features = false, optional = true } +sp-state-machine = { path = "../../primitives/state-machine", default-features = false } [features] -default = [ "historical", "std" ] -historical = [ "sp-trie" ] +default = ["historical", "std"] +historical = ["sp-trie"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/session/benchmarking/Cargo.toml b/substrate/frame/session/benchmarking/Cargo.toml index 87f08985138..db2b8b72209 100644 --- a/substrate/frame/session/benchmarking/Cargo.toml +++ b/substrate/frame/session/benchmarking/Cargo.toml @@ -15,14 +15,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } rand = { version = "0.8.5", default-features = false, features = ["std_rng"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-session = { path = "..", default-features = false} -pallet-staking = { path = "../../staking", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-session = { path = "../../../primitives/session", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-session = { path = "..", default-features = false } +pallet-staking = { path = "../../staking", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-session = { path = "../../../primitives/session", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } @@ -35,7 +35,7 @@ sp-core = { path = "../../../primitives/core" } sp-io = { path = "../../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-benchmarking/std", "frame-election-provider-support/std", diff --git a/substrate/frame/society/Cargo.toml b/substrate/frame/society/Cargo.toml index 654447e6893..045993290af 100644 --- a/substrate/frame/society/Cargo.toml +++ b/substrate/frame/society/Cargo.toml @@ -18,13 +18,13 @@ rand_chacha = { version = "0.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -sp-std = { path = "../../primitives/std", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } [dev-dependencies] frame-support-test = { path = "../support/test" } @@ -33,7 +33,7 @@ sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index 8fb0025e58f..1510d90ed58 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -13,23 +13,23 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"]} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = [ "derive", ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } sp-staking = { path = "../../primitives/staking", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } pallet-session = { path = "../session", default-features = false, features = [ "historical", -]} -pallet-authorship = { path = "../authorship", default-features = false} +] } +pallet-authorship = { path = "../authorship", default-features = false } sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } -frame-election-provider-support = { path = "../election-provider-support", default-features = false} +frame-election-provider-support = { path = "../election-provider-support", default-features = false } log = { version = "0.4.17", default-features = false } # Optional imports for benchmarking @@ -50,7 +50,7 @@ frame-election-provider-support = { path = "../election-provider-support" } rand_chacha = { version = "0.2" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/staking/reward-fn/Cargo.toml b/substrate/frame/staking/reward-fn/Cargo.toml index 25f4c33dd62..001c2b62656 100644 --- a/substrate/frame/staking/reward-fn/Cargo.toml +++ b/substrate/frame/staking/reward-fn/Cargo.toml @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = { version = "0.4.17", default-features = false } -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false} +sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } [features] -default = [ "std" ] -std = [ "log/std", "sp-arithmetic/std" ] +default = ["std"] +std = ["log/std", "sp-arithmetic/std"] diff --git a/substrate/frame/staking/runtime-api/Cargo.toml b/substrate/frame/staking/runtime-api/Cargo.toml index 746b463b8ce..061124fd184 100644 --- a/substrate/frame/staking/runtime-api/Cargo.toml +++ b/substrate/frame/staking/runtime-api/Cargo.toml @@ -18,5 +18,5 @@ sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../pri sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } [features] -default = [ "std" ] -std = [ "codec/std", "sp-api/std", "sp-staking/std" ] +default = ["std"] +std = ["codec/std", "sp-api/std", "sp-staking/std"] diff --git a/substrate/frame/state-trie-migration/Cargo.toml b/substrate/frame/state-trie-migration/Cargo.toml index 93e4d80b9ac..a3e6edd5aee 100644 --- a/substrate/frame/state-trie-migration/Cargo.toml +++ b/substrate/frame/state-trie-migration/Cargo.toml @@ -18,15 +18,15 @@ scale-info = { version = "2.10.0", default-features = false, features = ["derive serde = { version = "1.0.193", optional = true } thousands = { version = "0.2.0", optional = true } zstd = { version = "0.12.4", default-features = false, optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -remote-externalities = { package = "frame-remote-externalities" , path = "../../utils/frame/remote-externalities", optional = true} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -substrate-state-trie-migration-rpc = { path = "../../utils/frame/rpc/state-trie-migration-rpc", optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +remote-externalities = { package = "frame-remote-externalities", path = "../../utils/frame/remote-externalities", optional = true } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +substrate-state-trie-migration-rpc = { path = "../../utils/frame/rpc/state-trie-migration-rpc", optional = true } [dev-dependencies] parking_lot = "0.12.1" @@ -35,7 +35,7 @@ pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/statement/Cargo.toml b/substrate/frame/statement/Cargo.toml index a5c8cf5b8de..e4caf241233 100644 --- a/substrate/frame/statement/Cargo.toml +++ b/substrate/frame/statement/Cargo.toml @@ -12,23 +12,23 @@ description = "FRAME pallet for statement store" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"]} +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-statement-store = { path = "../../primitives/statement-store", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-statement-store = { path = "../../primitives/statement-store", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/sudo/Cargo.toml b/substrate/frame/sudo/Cargo.toml index ef507a95316..70323590085 100644 --- a/substrate/frame/sudo/Cargo.toml +++ b/substrate/frame/sudo/Cargo.toml @@ -14,13 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } docify = "0.2.6" @@ -28,7 +28,7 @@ docify = "0.2.6" sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/support/Cargo.toml b/substrate/frame/support/Cargo.toml index 887128e9561..6d253f29c3f 100644 --- a/substrate/frame/support/Cargo.toml +++ b/substrate/frame/support/Cargo.toml @@ -18,23 +18,23 @@ serde = { version = "1.0.193", default-features = false, features = ["alloc", "d codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../primitives/api", default-features = false, features = [ "frame-metadata" ] } -sp-std = { path = "../../primitives/std", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-tracing = { path = "../../primitives/tracing", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false} -sp-inherents = { path = "../../primitives/inherents", default-features = false} -sp-staking = { path = "../../primitives/staking", default-features = false} -sp-weights = { path = "../../primitives/weights", default-features = false} -sp-debug-derive = { path = "../../primitives/debug-derive", default-features = false} -sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false} +sp-api = { path = "../../primitives/api", default-features = false, features = ["frame-metadata"] } +sp-std = { path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-tracing = { path = "../../primitives/tracing", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-arithmetic = { path = "../../primitives/arithmetic", default-features = false } +sp-inherents = { path = "../../primitives/inherents", default-features = false } +sp-staking = { path = "../../primitives/staking", default-features = false } +sp-weights = { path = "../../primitives/weights", default-features = false } +sp-debug-derive = { path = "../../primitives/debug-derive", default-features = false } +sp-metadata-ir = { path = "../../primitives/metadata-ir", default-features = false } tt-call = "1.0.8" macro_magic = "0.5.0" -frame-support-procedural = { path = "procedural", default-features = false} +frame-support-procedural = { path = "procedural", default-features = false } paste = "1.0" -sp-state-machine = { path = "../../primitives/state-machine", default-features = false, optional = true} +sp-state-machine = { path = "../../primitives/state-machine", default-features = false, optional = true } bitflags = "1.3" impl-trait-for-tuples = "0.2.2" smallvec = "1.11.0" @@ -42,7 +42,7 @@ log = { version = "0.4.17", default-features = false } sp-core-hashing-proc-macro = { path = "../../primitives/core/hashing/proc-macro" } k256 = { version = "0.13.1", default-features = false, features = ["ecdsa"] } environmental = { version = "1.1.4", default-features = false } -sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features=false} +sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } docify = "0.2.6" static_assertions = "1.1.0" @@ -55,7 +55,7 @@ pretty_assertions = "1.2.1" frame-system = { path = "../system" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "environmental/std", @@ -101,8 +101,8 @@ no-metadata-docs = [ ] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. -full-metadata-docs = [ "scale-info/docs" ] +full-metadata-docs = ["scale-info/docs"] # Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of # pallets in a runtime grows. Does increase the compile time! -tuples-96 = [ "frame-support-procedural/tuples-96" ] -tuples-128 = [ "frame-support-procedural/tuples-128" ] +tuples-96 = ["frame-support-procedural/tuples-96"] +tuples-128 = ["frame-support-procedural/tuples-128"] diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index 45ed1750a52..5422ca0635c 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -29,7 +29,7 @@ expander = "2.0.0" sp-core-hashing = { path = "../../../primitives/core/hashing" } [features] -default = [ "std" ] +default = ["std"] std = [] no-metadata-docs = [] # Generate impl-trait for tuples with the given number of tuples. Will be needed as the number of diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index fd42e18180d..836e55b8a63 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,5 +15,5 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "visit", "extra-traits"] } +syn = { version = "2.0.38", features = ["extra-traits", "full", "visit"] } frame-support-procedural-tools-derive = { path = "derive" } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index 06f8e0f3d53..c943de998f4 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -17,4 +17,4 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.38", features = ["proc-macro", "full", "extra-traits", "parsing"] } +syn = { version = "2.0.38", features = ["extra-traits", "full", "parsing", "proc-macro"] } diff --git a/substrate/frame/support/test/Cargo.toml b/substrate/frame/support/test/Cargo.toml index 54f14df75bc..b88a459d92f 100644 --- a/substrate/frame/support/test/Cargo.toml +++ b/substrate/frame/support/test/Cargo.toml @@ -17,27 +17,27 @@ serde = { version = "1.0.193", default-features = false, features = ["derive"] } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } -sp-api = { path = "../../../primitives/api", default-features = false} -sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false} +sp-api = { path = "../../../primitives/api", default-features = false } +sp-arithmetic = { path = "../../../primitives/arithmetic", default-features = false } sp-io = { path = "../../../primitives/io", default-features = false } -sp-state-machine = { path = "../../../primitives/state-machine", optional = true} -frame-support = { path = "..", default-features = false} -frame-benchmarking = { path = "../../benchmarking", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} -sp-version = { path = "../../../primitives/version", default-features = false} -sp-metadata-ir = { path = "../../../primitives/metadata-ir", default-features = false} -trybuild = { version = "1.0.74", features = [ "diff" ] } +sp-state-machine = { path = "../../../primitives/state-machine", optional = true } +frame-support = { path = "..", default-features = false } +frame-benchmarking = { path = "../../benchmarking", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +sp-version = { path = "../../../primitives/version", default-features = false } +sp-metadata-ir = { path = "../../../primitives/metadata-ir", default-features = false } +trybuild = { version = "1.0.74", features = ["diff"] } pretty_assertions = "1.3.0" rustversion = "1.0.6" -frame-system = { path = "../../system", default-features = false} -frame-executive = { path = "../../executive", default-features = false} +frame-system = { path = "../../system", default-features = false } +frame-executive = { path = "../../executive", default-features = false } # The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message -test-pallet = { package = "frame-support-test-pallet", path = "pallet", default-features = false} +test-pallet = { package = "frame-support-test-pallet", path = "pallet", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", @@ -58,7 +58,7 @@ std = [ "sp-version/std", "test-pallet/std", ] -experimental = [ "frame-support/experimental" ] +experimental = ["frame-support/experimental"] try-runtime = [ "frame-executive/try-runtime", "frame-support/try-runtime", @@ -72,4 +72,4 @@ frame-feature-testing = [] frame-feature-testing-2 = [] # Disable ui tests disable-ui-tests = [] -no-metadata-docs = [ "frame-support/no-metadata-docs" ] +no-metadata-docs = ["frame-support/no-metadata-docs"] diff --git a/substrate/frame/support/test/compile_pass/Cargo.toml b/substrate/frame/support/test/compile_pass/Cargo.toml index 19465d924ec..916771bd471 100644 --- a/substrate/frame/support/test/compile_pass/Cargo.toml +++ b/substrate/frame/support/test/compile_pass/Cargo.toml @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -renamed-frame-support = { package = "frame-support", path = "../..", default-features = false} -renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false} -sp-core = { path = "../../../../primitives/core", default-features = false} -sp-runtime = { path = "../../../../primitives/runtime", default-features = false} -sp-version = { path = "../../../../primitives/version", default-features = false} +renamed-frame-support = { package = "frame-support", path = "../..", default-features = false } +renamed-frame-system = { package = "frame-system", path = "../../../system", default-features = false } +sp-core = { path = "../../../../primitives/core", default-features = false } +sp-runtime = { path = "../../../../primitives/runtime", default-features = false } +sp-version = { path = "../../../../primitives/version", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "renamed-frame-support/std", diff --git a/substrate/frame/support/test/pallet/Cargo.toml b/substrate/frame/support/test/pallet/Cargo.toml index 8d0f252326d..e1136d5bc02 100644 --- a/substrate/frame/support/test/pallet/Cargo.toml +++ b/substrate/frame/support/test/pallet/Cargo.toml @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", default-features = false, features = ["derive"] } -frame-support = { path = "../..", default-features = false} -frame-system = { path = "../../../system", default-features = false} -sp-runtime = { path = "../../../../primitives/runtime", default-features = false} +frame-support = { path = "../..", default-features = false } +frame-system = { path = "../../../system", default-features = false } +sp-runtime = { path = "../../../../primitives/runtime", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/support/test/stg_frame_crate/Cargo.toml b/substrate/frame/support/test/stg_frame_crate/Cargo.toml index 0f9617c0368..0b3b584910a 100644 --- a/substrate/frame/support/test/stg_frame_crate/Cargo.toml +++ b/substrate/frame/support/test/stg_frame_crate/Cargo.toml @@ -13,9 +13,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -frame = { path = "../../..", default-features = false, features = ["runtime", "experimental"]} +frame = { path = "../../..", default-features = false, features = ["experimental", "runtime"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } [features] -default = [ "std" ] -std = [ "codec/std", "frame/std", "scale-info/std" ] +default = ["std"] +std = ["codec/std", "frame/std", "scale-info/std"] diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 8de44947dc0..0ddff8bf41e 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -17,12 +17,12 @@ cfg-if = "1.0" codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive", "serde"] } -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"] } -frame-support = { path = "../support", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"] } +frame-support = { path = "../support", default-features = false } sp-core = { path = "../../primitives/core", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -sp-std = { path = "../../primitives/std", default-features = false} +sp-std = { path = "../../primitives/std", default-features = false } sp-version = { path = "../../primitives/version", default-features = false, features = ["serde"] } sp-weights = { path = "../../primitives/weights", default-features = false, features = ["serde"] } docify = "0.2.6" @@ -33,7 +33,7 @@ sp-externalities = { path = "../../primitives/externalities" } substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", @@ -52,7 +52,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -try-runtime = [ "frame-support/try-runtime", "sp-runtime/try-runtime" ] +try-runtime = ["frame-support/try-runtime", "sp-runtime/try-runtime"] experimental = [] [[bench]] diff --git a/substrate/frame/system/benchmarking/Cargo.toml b/substrate/frame/system/benchmarking/Cargo.toml index c1d241f4bec..3e92c56408e 100644 --- a/substrate/frame/system/benchmarking/Cargo.toml +++ b/substrate/frame/system/benchmarking/Cargo.toml @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../../benchmarking", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "..", default-features = false} -sp-core = { path = "../../../primitives/core", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +frame-benchmarking = { path = "../../benchmarking", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "..", default-features = false } +sp-core = { path = "../../../primitives/core", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } [dev-dependencies] sp-io = { path = "../../../primitives/io" } @@ -28,7 +28,7 @@ sp-externalities = { path = "../../../primitives/externalities" } sp-version = { path = "../../../primitives/version" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/system/rpc/runtime-api/Cargo.toml b/substrate/frame/system/rpc/runtime-api/Cargo.toml index 81b6d946d46..68dc7fc9905 100644 --- a/substrate/frame/system/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/system/rpc/runtime-api/Cargo.toml @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } -sp-api = { path = "../../../../primitives/api", default-features = false} +sp-api = { path = "../../../../primitives/api", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "sp-api/std" ] +default = ["std"] +std = ["codec/std", "sp-api/std"] diff --git a/substrate/frame/timestamp/Cargo.toml b/substrate/frame/timestamp/Cargo.toml index e23ded725d8..fd14216bdb3 100644 --- a/substrate/frame/timestamp/Cargo.toml +++ b/substrate/frame/timestamp/Cargo.toml @@ -17,15 +17,15 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-inherents = { path = "../../primitives/inherents", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false, optional = true} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-storage = { path = "../../primitives/storage", default-features = false} -sp-timestamp = { path = "../../primitives/timestamp", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-inherents = { path = "../../primitives/inherents", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false, optional = true } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-storage = { path = "../../primitives/storage", default-features = false } +sp-timestamp = { path = "../../primitives/timestamp", default-features = false } docify = "0.2.6" @@ -34,7 +34,7 @@ sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/tips/Cargo.toml b/substrate/frame/tips/Cargo.toml index a189c6691af..a86034d92f5 100644 --- a/substrate/frame/tips/Cargo.toml +++ b/substrate/frame/tips/Cargo.toml @@ -17,21 +17,21 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", features = ["derive"], optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-treasury = { path = "../treasury", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-treasury = { path = "../treasury", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-storage = { path = "../../primitives/storage" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/transaction-payment/Cargo.toml b/substrate/frame/transaction-payment/Cargo.toml index 407740cfc0f..c431f7f8243 100644 --- a/substrate/frame/transaction-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/Cargo.toml @@ -18,19 +18,19 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", optional = true } -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] serde_json = "1.0.108" pallet-balances = { path = "../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml index 2cac47fb3b7..7d15740e824 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/Cargo.toml @@ -14,24 +14,24 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-asset-conversion = { path = "../../asset-conversion", default-features = false} -pallet-transaction-payment = { path = "..", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-asset-conversion = { path = "../../asset-conversion", default-features = false } +pallet-transaction-payment = { path = "..", default-features = false } codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } [dev-dependencies] -sp-core = { path = "../../../primitives/core", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-storage = { path = "../../../primitives/storage", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-storage = { path = "../../../primitives/storage", default-features = false } pallet-assets = { path = "../../assets" } pallet-balances = { path = "../../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml index 59232a72b99..9a614316e53 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/asset-tx-payment/Cargo.toml @@ -14,14 +14,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-core = { path = "../../../primitives/core", default-features = false} -sp-io = { path = "../../../primitives/io", default-features = false} -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-core = { path = "../../../primitives/core", default-features = false } +sp-io = { path = "../../../primitives/io", default-features = false } +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} -pallet-transaction-payment = { path = "..", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } +pallet-transaction-payment = { path = "..", default-features = false } frame-benchmarking = { path = "../../benchmarking", default-features = false, optional = true } # Other dependencies @@ -32,14 +32,14 @@ serde = { version = "1.0.193", optional = true } [dev-dependencies] serde_json = "1.0.108" -sp-storage = { path = "../../../primitives/storage", default-features = false} +sp-storage = { path = "../../../primitives/storage", default-features = false } pallet-assets = { path = "../../assets" } pallet-authorship = { path = "../../authorship" } pallet-balances = { path = "../../balances" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/transaction-payment/rpc/Cargo.toml b/substrate/frame/transaction-payment/rpc/Cargo.toml index 8a0052e0337..048b7da63f6 100644 --- a/substrate/frame/transaction-payment/rpc/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } pallet-transaction-payment-rpc-runtime-api = { path = "runtime-api" } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml index af098fd34ed..17213392e1c 100644 --- a/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/substrate/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -14,13 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -pallet-transaction-payment = { path = "../..", default-features = false} -sp-api = { path = "../../../../primitives/api", default-features = false} -sp-runtime = { path = "../../../../primitives/runtime", default-features = false} -sp-weights = { path = "../../../../primitives/weights", default-features = false} +pallet-transaction-payment = { path = "../..", default-features = false } +sp-api = { path = "../../../../primitives/api", default-features = false } +sp-runtime = { path = "../../../../primitives/runtime", default-features = false } +sp-weights = { path = "../../../../primitives/weights", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "pallet-transaction-payment/std", diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml index cfb814e2e38..25a708d69de 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml +++ b/substrate/frame/transaction-payment/skip-feeless-payment/Cargo.toml @@ -12,18 +12,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate dependencies -sp-runtime = { path = "../../../primitives/runtime", default-features = false} -sp-std = { path = "../../../primitives/std", default-features = false} +sp-runtime = { path = "../../../primitives/runtime", default-features = false } +sp-std = { path = "../../../primitives/std", default-features = false } -frame-support = { path = "../../support", default-features = false} -frame-system = { path = "../../system", default-features = false} +frame-support = { path = "../../support", default-features = false } +frame-system = { path = "../../system", default-features = false } # Other dependencies codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", diff --git a/substrate/frame/transaction-storage/Cargo.toml b/substrate/frame/transaction-storage/Cargo.toml index a847ca6a790..a2df1a3ce2a 100644 --- a/substrate/frame/transaction-storage/Cargo.toml +++ b/substrate/frame/transaction-storage/Cargo.toml @@ -17,23 +17,23 @@ array-bytes = { version = "6.1", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-balances = { path = "../balances", default-features = false} -sp-inherents = { path = "../../primitives/inherents", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-balances = { path = "../balances", default-features = false } +sp-inherents = { path = "../../primitives/inherents", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = false } log = { version = "0.4.17", default-features = false } [dev-dependencies] -sp-core = { path = "../../primitives/core", default-features = false} -sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = true} +sp-core = { path = "../../primitives/core", default-features = false } +sp-transaction-storage-proof = { path = "../../primitives/transaction-storage-proof", default-features = true } [features] -default = [ "std" ] +default = ["std"] runtime-benchmarks = [ "array-bytes", "frame-benchmarking/runtime-benchmarks", diff --git a/substrate/frame/treasury/Cargo.toml b/substrate/frame/treasury/Cargo.toml index c9fdb3d5b90..298c52d0e48 100644 --- a/substrate/frame/treasury/Cargo.toml +++ b/substrate/frame/treasury/Cargo.toml @@ -21,13 +21,13 @@ docify = "0.2.0" impl-trait-for-tuples = "0.2.2" scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", features = ["derive"], optional = true } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -pallet-balances = { path = "../balances", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false, optional = true} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +pallet-balances = { path = "../balances", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false, optional = true } [dev-dependencies] sp-io = { path = "../../primitives/io" } @@ -35,7 +35,7 @@ pallet-utility = { path = "../utility" } sp-core = { path = "../../primitives/core", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/try-runtime/Cargo.toml b/substrate/frame/try-runtime/Cargo.toml index 1bb3283b1de..4dd51647174 100644 --- a/substrate/frame/try-runtime/Cargo.toml +++ b/substrate/frame/try-runtime/Cargo.toml @@ -12,14 +12,14 @@ description = "FRAME pallet for democracy" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"]} -frame-support = { path = "../support", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +frame-support = { path = "../support", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-support/std", @@ -27,4 +27,4 @@ std = [ "sp-runtime/std", "sp-std/std", ] -try-runtime = [ "frame-support/try-runtime", "sp-runtime/try-runtime" ] +try-runtime = ["frame-support/try-runtime", "sp-runtime/try-runtime"] diff --git a/substrate/frame/tx-pause/Cargo.toml b/substrate/frame/tx-pause/Cargo.toml index bf0641f5b6d..60623bb9d38 100644 --- a/substrate/frame/tx-pause/Cargo.toml +++ b/substrate/frame/tx-pause/Cargo.toml @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } docify = "0.2.6" -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } pallet-balances = { path = "../balances", default-features = false, optional = true } -pallet-utility = { path = "../utility", default-features = false, optional = true } -pallet-proxy = { path = "../proxy", default-features = false, optional = true } +pallet-utility = { path = "../utility", default-features = false, optional = true } +pallet-proxy = { path = "../proxy", default-features = false, optional = true } [dev-dependencies] sp-core = { path = "../../primitives/core" } @@ -32,7 +32,7 @@ pallet-utility = { path = "../utility" } pallet-proxy = { path = "../proxy" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking/std", diff --git a/substrate/frame/uniques/Cargo.toml b/substrate/frame/uniques/Cargo.toml index 4c1bcca573d..300b319ede0 100644 --- a/substrate/frame/uniques/Cargo.toml +++ b/substrate/frame/uniques/Cargo.toml @@ -16,11 +16,11 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -29,7 +29,7 @@ sp-io = { path = "../../primitives/io" } sp-std = { path = "../../primitives/std" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/utility/Cargo.toml b/substrate/frame/utility/Cargo.toml index 8f7a368709b..73c25a60daf 100644 --- a/substrate/frame/utility/Cargo.toml +++ b/substrate/frame/utility/Cargo.toml @@ -15,13 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -31,7 +31,7 @@ pallet-timestamp = { path = "../timestamp" } sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/vesting/Cargo.toml b/substrate/frame/vesting/Cargo.toml index ed13a15bc97..37339d87aea 100644 --- a/substrate/frame/vesting/Cargo.toml +++ b/substrate/frame/vesting/Cargo.toml @@ -18,19 +18,19 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = ] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } sp-core = { path = "../../primitives/core" } -sp-io = { path = "../../primitives/io", default-features = false} +sp-io = { path = "../../primitives/io", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index c5246615320..0d7ab6c2967 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -14,12 +14,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true} -frame-support = { path = "../support", default-features = false} -frame-system = { path = "../system", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} -sp-runtime = { path = "../../primitives/runtime", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } [dev-dependencies] pallet-balances = { path = "../balances" } @@ -28,7 +28,7 @@ sp-core = { path = "../../primitives/core" } sp-io = { path = "../../primitives/io" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "frame-benchmarking?/std", diff --git a/substrate/primitives/api/Cargo.toml b/substrate/primitives/api/Cargo.toml index e95333c9eeb..1be131a7b4f 100644 --- a/substrate/primitives/api/Cargo.toml +++ b/substrate/primitives/api/Cargo.toml @@ -32,7 +32,7 @@ log = { version = "0.4.17", default-features = false } sp-test-primitives = { path = "../test-primitives" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "hash-db", @@ -58,7 +58,7 @@ std = [ # building a runtime for registering it on chain. # # This sets the max logging level to `off` for `log`. -disable-logging = [ "log/max_level_off" ] +disable-logging = ["log/max_level_off"] # Do not report the documentation in the metadata. -no-metadata-docs = [ "sp-api-proc-macro/no-metadata-docs" ] -frame-metadata = [ "sp-api-proc-macro/frame-metadata", "sp-metadata-ir" ] +no-metadata-docs = ["sp-api-proc-macro/no-metadata-docs"] +frame-metadata = ["sp-api-proc-macro/frame-metadata", "sp-metadata-ir"] diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index d60b9f1bb4e..8188299f533 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.38", features = ["extra-traits", "fold", "full", "visit"] } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "1.1.3" @@ -29,7 +29,7 @@ assert_matches = "1.3.0" [features] # Required for the doc tests -default = [ "std" ] -std = [ "blake2/std" ] +default = ["std"] +std = ["blake2/std"] no-metadata-docs = [] frame-metadata = [] diff --git a/substrate/primitives/application-crypto/Cargo.toml b/substrate/primitives/application-crypto/Cargo.toml index 7896df94913..a6c937a3469 100644 --- a/substrate/primitives/application-crypto/Cargo.toml +++ b/substrate/primitives/application-crypto/Cargo.toml @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../core", default-features = false} +sp-core = { path = "../core", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, optional = true, features = ["derive", "alloc"] } -sp-std = { path = "../std", default-features = false} -sp-io = { path = "../io", default-features = false} +serde = { version = "1.0.193", default-features = false, optional = true, features = ["alloc", "derive"] } +sp-std = { path = "../std", default-features = false } +sp-io = { path = "../io", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "full_crypto", @@ -35,7 +35,7 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "scale-info/serde", "sp-core/serde" ] +serde = ["dep:serde", "scale-info/serde", "sp-core/serde"] # This feature enables all crypto primitives for `no_std` builds like microcontrollers # or Intel SGX. @@ -51,7 +51,7 @@ full_crypto = [ # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = [ "sp-core/bls-experimental", "sp-io/bls-experimental" ] +bls-experimental = ["sp-core/bls-experimental", "sp-io/bls-experimental"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still diff --git a/substrate/primitives/application-crypto/test/Cargo.toml b/substrate/primitives/application-crypto/test/Cargo.toml index a6f4f108c8f..d9fb743e8cd 100644 --- a/substrate/primitives/application-crypto/test/Cargo.toml +++ b/substrate/primitives/application-crypto/test/Cargo.toml @@ -15,6 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sp-api = { path = "../../api" } sp-application-crypto = { path = ".." } -sp-core = { path = "../../core", default-features = false} -sp-keystore = { path = "../../keystore", default-features = false} +sp-core = { path = "../../core", default-features = false } +sp-keystore = { path = "../../keystore", default-features = false } substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } diff --git a/substrate/primitives/arithmetic/Cargo.toml b/substrate/primitives/arithmetic/Cargo.toml index ff938795aa3..8634dabe854 100644 --- a/substrate/primitives/arithmetic/Cargo.toml +++ b/substrate/primitives/arithmetic/Cargo.toml @@ -21,18 +21,18 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } static_assertions = "1.1.0" -sp-std = { path = "../std", default-features = false} +sp-std = { path = "../std", default-features = false } [dev-dependencies] criterion = "0.4.0" primitive-types = "0.12.0" -sp-core = { path = "../core", features = ["full_crypto"]} +sp-core = { path = "../core", features = ["full_crypto"] } rand = "0.8.5" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "num-traits/std", @@ -42,7 +42,7 @@ std = [ "sp-std/std", ] # Serde support without relying on std features. -serde = [ "dep:serde", "scale-info/serde" ] +serde = ["dep:serde", "scale-info/serde"] [[bench]] name = "bench" diff --git a/substrate/primitives/authority-discovery/Cargo.toml b/substrate/primitives/authority-discovery/Cargo.toml index e4f44e9da38..c8a93980be2 100644 --- a/substrate/primitives/authority-discovery/Cargo.toml +++ b/substrate/primitives/authority-discovery/Cargo.toml @@ -15,13 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../api", default-features = false} -sp-application-crypto = { path = "../application-crypto", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-application-crypto = { path = "../application-crypto", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index 269eb539532..a574689811b 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false} -sp-inherents = { path = "../inherents", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-inherents = { path = "../inherents", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] -std = [ "sp-api/std", "sp-inherents/std", "sp-runtime/std", "sp-std/std" ] +default = ["std"] +std = ["sp-api/std", "sp-inherents/std", "sp-runtime/std", "sp-std/std"] diff --git a/substrate/primitives/consensus/aura/Cargo.toml b/substrate/primitives/consensus/aura/Cargo.toml index 26f02bc3119..4a19999a469 100644 --- a/substrate/primitives/consensus/aura/Cargo.toml +++ b/substrate/primitives/consensus/aura/Cargo.toml @@ -16,16 +16,16 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../../api", default-features = false} -sp-application-crypto = { path = "../../application-crypto", default-features = false} -sp-consensus-slots = { path = "../slots", default-features = false} -sp-inherents = { path = "../../inherents", default-features = false} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} -sp-timestamp = { path = "../../timestamp", default-features = false} +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-consensus-slots = { path = "../slots", default-features = false } +sp-inherents = { path = "../../inherents", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } +sp-timestamp = { path = "../../timestamp", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/consensus/babe/Cargo.toml b/substrate/primitives/consensus/babe/Cargo.toml index f952a8ab4ee..6ec50ea022b 100644 --- a/substrate/primitives/consensus/babe/Cargo.toml +++ b/substrate/primitives/consensus/babe/Cargo.toml @@ -16,18 +16,18 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } -sp-api = { path = "../../api", default-features = false} -sp-application-crypto = { path = "../../application-crypto", default-features = false} -sp-consensus-slots = { path = "../slots", default-features = false} -sp-core = { path = "../../core", default-features = false} -sp-inherents = { path = "../../inherents", default-features = false} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} -sp-timestamp = { path = "../../timestamp", optional = true} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-consensus-slots = { path = "../slots", default-features = false } +sp-core = { path = "../../core", default-features = false } +sp-inherents = { path = "../../inherents", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } +sp-timestamp = { path = "../../timestamp", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index 67c6ad57e2d..916125d783d 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, optional = true, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, optional = true, features = ["alloc", "derive"] } sp-api = { path = "../../api", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } sp-core = { path = "../../core", default-features = false } @@ -30,7 +30,7 @@ array-bytes = "6.1" w3f-bls = { version = "0.1.3", features = ["std"] } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/consensus/grandpa/Cargo.toml b/substrate/primitives/consensus/grandpa/Cargo.toml index ffba6490823..1ddc89df983 100644 --- a/substrate/primitives/consensus/grandpa/Cargo.toml +++ b/substrate/primitives/consensus/grandpa/Cargo.toml @@ -18,16 +18,16 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = grandpa = { package = "finality-grandpa", version = "0.16.2", default-features = false, features = ["derive-codec"] } log = { version = "0.4.17", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", features = ["derive", "alloc"], default-features = false, optional = true } -sp-api = { path = "../../api", default-features = false} -sp-application-crypto = { path = "../../application-crypto", default-features = false} -sp-core = { path = "../../core", default-features = false} -sp-keystore = { path = "../../keystore", default-features = false, optional = true} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} +serde = { version = "1.0.193", features = ["alloc", "derive"], default-features = false, optional = true } +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-core = { path = "../../core", default-features = false } +sp-keystore = { path = "../../keystore", default-features = false, optional = true } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "grandpa/std", diff --git a/substrate/primitives/consensus/pow/Cargo.toml b/substrate/primitives/consensus/pow/Cargo.toml index cc4e961dbb6..5e134eb2a29 100644 --- a/substrate/primitives/consensus/pow/Cargo.toml +++ b/substrate/primitives/consensus/pow/Cargo.toml @@ -14,13 +14,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } -sp-api = { path = "../../api", default-features = false} -sp-core = { path = "../../core", default-features = false} -sp-runtime = { path = "../../runtime", default-features = false} -sp-std = { path = "../../std", default-features = false} +sp-api = { path = "../../api", default-features = false } +sp-core = { path = "../../core", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "sp-api/std", diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 2c5971e919e..67f09e2b904 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -26,7 +26,7 @@ sp-runtime = { default-features = false, path = "../../runtime" } sp-std = { default-features = false, path = "../../std" } [features] -default = [ "std" ] +default = ["std"] std = [ "scale-codec/std", "scale-info/std", diff --git a/substrate/primitives/consensus/slots/Cargo.toml b/substrate/primitives/consensus/slots/Cargo.toml index aa899d86e72..12940583757 100644 --- a/substrate/primitives/consensus/slots/Cargo.toml +++ b/substrate/primitives/consensus/slots/Cargo.toml @@ -15,12 +15,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0", default-features = false, features = ["derive", "alloc"], optional = true } -sp-std = { path = "../../std", default-features = false} -sp-timestamp = { path = "../../timestamp", default-features = false} +serde = { version = "1.0", default-features = false, features = ["alloc", "derive"], optional = true } +sp-std = { path = "../../std", default-features = false } +sp-timestamp = { path = "../../timestamp", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", @@ -30,4 +30,4 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "scale-info/serde" ] +serde = ["dep:serde", "scale-info/serde"] diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index fe82a2cd62e..9c556c07736 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -13,27 +13,27 @@ documentation = "https://docs.rs/sp-core" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive","max-encoded-len"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } -serde = { version = "1.0.193", optional = true, default-features = false, features = ["derive", "alloc"] } +serde = { version = "1.0.193", optional = true, default-features = false, features = ["alloc", "derive"] } bounded-collections = { version = "0.1.8", default-features = false } primitive-types = { version = "0.12.0", default-features = false, features = ["codec", "scale-info"] } impl-serde = { version = "0.4.0", default-features = false, optional = true } hash-db = { version = "0.16.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } bs58 = { version = "0.5.0", default-features = false, optional = true } -rand = { version = "0.8.5", features = ["small_rng"], optional = true } +rand = { version = "0.8.5", features = ["small_rng"], optional = true } substrate-bip39 = { version = "0.4.4", optional = true } bip39 = { version = "2.0.0", default-features = false } zeroize = { version = "1.4.3", default-features = false } secrecy = { version = "0.8.0", default-features = false } parking_lot = { version = "0.12.1", optional = true } ss58-registry = { version = "1.34.0", default-features = false } -sp-std = { path = "../std", default-features = false} -sp-debug-derive = { path = "../debug-derive", default-features = false} -sp-storage = { path = "../storage", default-features = false} -sp-externalities = { path = "../externalities", optional = true} +sp-std = { path = "../std", default-features = false } +sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-storage = { path = "../storage", default-features = false } +sp-externalities = { path = "../externalities", optional = true } futures = { version = "0.3.21", optional = true } dyn-clonable = { version = "0.9.0", optional = true } thiserror = { version = "1.0.48", optional = true } @@ -49,12 +49,12 @@ blake2 = { version = "0.10.4", default-features = false, optional = true } libsecp256k1 = { version = "0.7", default-features = false, features = ["static-context"], optional = true } schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false } merlin = { version = "2.0", default-features = false } -secp256k1 = { version = "0.28.0", default-features = false, features = ["recovery", "alloc"], optional = true } +secp256k1 = { version = "0.28.0", default-features = false, features = ["alloc", "recovery"], optional = true } sp-core-hashing = { path = "hashing", default-features = false, optional = true } -sp-runtime-interface = { path = "../runtime-interface", default-features = false} +sp-runtime-interface = { path = "../runtime-interface", default-features = false } # bls crypto -w3f-bls = { version = "0.1.3", default-features = false, optional = true} +w3f-bls = { version = "0.1.3", default-features = false, optional = true } # bandersnatch crypto bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "3ddc205", default-features = false, optional = true } @@ -73,7 +73,7 @@ harness = false bench = false [features] -default = [ "std" ] +default = ["std"] std = [ "array-bytes", "bandersnatch_vrfs?/std", @@ -153,9 +153,9 @@ full_crypto = [ # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = [ "w3f-bls" ] +bls-experimental = ["w3f-bls"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bandersnatch-experimental = [ "bandersnatch_vrfs" ] +bandersnatch-experimental = ["bandersnatch_vrfs"] diff --git a/substrate/primitives/core/hashing/Cargo.toml b/substrate/primitives/core/hashing/Cargo.toml index bd22bd79e7d..7b4f4bc7438 100644 --- a/substrate/primitives/core/hashing/Cargo.toml +++ b/substrate/primitives/core/hashing/Cargo.toml @@ -21,7 +21,7 @@ sha3 = { version = "0.10.0", default-features = false } twox-hash = { version = "1.6.3", default-features = false, features = ["digest_0_10"] } [features] -default = [ "std" ] +default = ["std"] std = [ "blake2b_simd/std", "byteorder/std", diff --git a/substrate/primitives/core/hashing/proc-macro/Cargo.toml b/substrate/primitives/core/hashing/proc-macro/Cargo.toml index 187b5559b93..e34e20472bf 100644 --- a/substrate/primitives/core/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/core/hashing/proc-macro/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true [dependencies] quote = "1.0.28" syn = { version = "2.0.38", features = ["full", "parsing"] } -sp-core-hashing = { path = "..", default-features = false} +sp-core-hashing = { path = "..", default-features = false } diff --git a/substrate/primitives/crypto/ec-utils/Cargo.toml b/substrate/primitives/crypto/ec-utils/Cargo.toml index 1484406e7b2..548328fec3c 100644 --- a/substrate/primitives/crypto/ec-utils/Cargo.toml +++ b/substrate/primitives/crypto/ec-utils/Cargo.toml @@ -28,7 +28,7 @@ sp-runtime-interface = { path = "../../runtime-interface", default-features = fa sp-std = { path = "../../std", default-features = false, optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "ark-bls12-377-ext?/std", "ark-bls12-377?/std", @@ -46,11 +46,11 @@ std = [ "sp-runtime-interface?/std", "sp-std?/std", ] -common = [ "ark-ec", "ark-scale", "sp-runtime-interface", "sp-std" ] -bls12-377 = [ "ark-bls12-377", "ark-bls12-377-ext", "common" ] -bls12-381 = [ "ark-bls12-381", "ark-bls12-381-ext", "common" ] -bw6-761 = [ "ark-bw6-761", "ark-bw6-761-ext", "common" ] -ed-on-bls12-377 = [ "ark-ed-on-bls12-377", "ark-ed-on-bls12-377-ext", "common" ] +common = ["ark-ec", "ark-scale", "sp-runtime-interface", "sp-std"] +bls12-377 = ["ark-bls12-377", "ark-bls12-377-ext", "common"] +bls12-381 = ["ark-bls12-381", "ark-bls12-381-ext", "common"] +bw6-761 = ["ark-bw6-761", "ark-bw6-761-ext", "common"] +ed-on-bls12-377 = ["ark-ed-on-bls12-377", "ark-ed-on-bls12-377-ext", "common"] ed-on-bls12-381-bandersnatch = [ "ark-ed-on-bls12-381-bandersnatch", "ark-ed-on-bls12-381-bandersnatch-ext", diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index c97c8a0a399..217bcfa98d5 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -22,7 +22,7 @@ syn = "2.0.38" proc-macro2 = "1.0.56" [features] -default = [ "std" ] +default = ["std"] std = [] # By default `RuntimeDebug` implements `Debug` that outputs `` when `std` is # disabled. However, sometimes downstream users need to have the real `Debug` implementation for diff --git a/substrate/primitives/externalities/Cargo.toml b/substrate/primitives/externalities/Cargo.toml index 417eb363867..86d31c31cba 100644 --- a/substrate/primitives/externalities/Cargo.toml +++ b/substrate/primitives/externalities/Cargo.toml @@ -16,9 +16,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } environmental = { version = "1.1.3", default-features = false } -sp-std = { path = "../std", default-features = false} -sp-storage = { path = "../storage", default-features = false} +sp-std = { path = "../std", default-features = false } +sp-storage = { path = "../storage", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "environmental/std", "sp-std/std", "sp-storage/std" ] +default = ["std"] +std = ["codec/std", "environmental/std", "sp-std/std", "sp-storage/std"] diff --git a/substrate/primitives/genesis-builder/Cargo.toml b/substrate/primitives/genesis-builder/Cargo.toml index cf7ce995711..00b3bc876ac 100644 --- a/substrate/primitives/genesis-builder/Cargo.toml +++ b/substrate/primitives/genesis-builder/Cargo.toml @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } serde_json = { version = "1.0.108", default-features = false, features = ["alloc"] } [features] -default = [ "std" ] -std = [ "serde_json/std", "sp-api/std", "sp-runtime/std", "sp-std/std" ] +default = ["std"] +std = ["serde_json/std", "sp-api/std", "sp-runtime/std", "sp-std/std"] diff --git a/substrate/primitives/inherents/Cargo.toml b/substrate/primitives/inherents/Cargo.toml index 4a511c653fd..2b5bad5d746 100644 --- a/substrate/primitives/inherents/Cargo.toml +++ b/substrate/primitives/inherents/Cargo.toml @@ -19,14 +19,14 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" thiserror = { version = "1.0.48", optional = true } -sp-runtime = { path = "../runtime", default-features = false, optional = true} -sp-std = { path = "../std", default-features = false} +sp-runtime = { path = "../runtime", default-features = false, optional = true } +sp-std = { path = "../std", default-features = false } [dev-dependencies] futures = "0.3.21" [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/io/Cargo.toml b/substrate/primitives/io/Cargo.toml index 59df8895bb7..e95a9302d24 100644 --- a/substrate/primitives/io/Cargo.toml +++ b/substrate/primitives/io/Cargo.toml @@ -18,19 +18,19 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } -sp-core = { path = "../core", default-features = false} -sp-keystore = { path = "../keystore", default-features = false, optional = true} -sp-std = { path = "../std", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-keystore = { path = "../keystore", default-features = false, optional = true } +sp-std = { path = "../std", default-features = false } libsecp256k1 = { version = "0.7", optional = true } -sp-state-machine = { path = "../state-machine", default-features = false, optional = true} -sp-runtime-interface = { path = "../runtime-interface", default-features = false} -sp-trie = { path = "../trie", default-features = false, optional = true} -sp-externalities = { path = "../externalities", default-features = false} -sp-tracing = { path = "../tracing", default-features = false} +sp-state-machine = { path = "../state-machine", default-features = false, optional = true } +sp-runtime-interface = { path = "../runtime-interface", default-features = false } +sp-trie = { path = "../trie", default-features = false, optional = true } +sp-externalities = { path = "../externalities", default-features = false } +sp-tracing = { path = "../tracing", default-features = false } log = { version = "0.4.17", optional = true } -secp256k1 = { version = "0.28.0", features = ["recovery", "global-context"], optional = true } +secp256k1 = { version = "0.28.0", features = ["global-context", "recovery"], optional = true } tracing = { version = "0.1.29", default-features = false } -tracing-core = { version = "0.1.28", default-features = false} +tracing-core = { version = "0.1.28", default-features = false } # Required for backwards compatibility reason, but only used for verifying when `UseDalekExt` is set. ed25519-dalek = { version = "2.0", default-features = false, optional = true } @@ -39,7 +39,7 @@ ed25519-dalek = { version = "2.0", default-features = false, optional = true } rustversion = "1.0.6" [features] -default = [ "std" ] +default = ["std"] std = [ "bytes/std", "codec/std", @@ -60,7 +60,7 @@ std = [ "tracing/std", ] -with-tracing = [ "sp-tracing/with-tracing" ] +with-tracing = ["sp-tracing/with-tracing"] # These two features are used for `no_std` builds for the environments which already provides # `#[panic_handler]`, `#[alloc_error_handler]` and `#[global_allocator]`. @@ -92,9 +92,9 @@ improved_panic_error_reporting = [] # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = [ "sp-keystore/bls-experimental" ] +bls-experimental = ["sp-keystore/bls-experimental"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bandersnatch-experimental = [ "sp-keystore/bandersnatch-experimental" ] +bandersnatch-experimental = ["sp-keystore/bandersnatch-experimental"] diff --git a/substrate/primitives/keyring/Cargo.toml b/substrate/primitives/keyring/Cargo.toml index 1ab78eeed45..a504cda756e 100644 --- a/substrate/primitives/keyring/Cargo.toml +++ b/substrate/primitives/keyring/Cargo.toml @@ -23,4 +23,4 @@ sp-runtime = { path = "../runtime" } # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bandersnatch-experimental = [ "sp-core/bandersnatch-experimental" ] +bandersnatch-experimental = ["sp-core/bandersnatch-experimental"] diff --git a/substrate/primitives/keystore/Cargo.toml b/substrate/primitives/keystore/Cargo.toml index ff7c27bf565..819b2c518a0 100644 --- a/substrate/primitives/keystore/Cargo.toml +++ b/substrate/primitives/keystore/Cargo.toml @@ -16,23 +16,23 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } parking_lot = { version = "0.12.1", default-features = false } thiserror = "1.0" -sp-core = { path = "../core", default-features = false} -sp-externalities = { path = "../externalities", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-externalities = { path = "../externalities", default-features = false } [dev-dependencies] rand = "0.7.2" rand_chacha = "0.2.2" [features] -default = [ "std" ] -std = [ "codec/std", "sp-core/std", "sp-externalities/std" ] +default = ["std"] +std = ["codec/std", "sp-core/std", "sp-externalities/std"] # This feature adds BLS crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bls-experimental = [ "sp-core/bls-experimental" ] +bls-experimental = ["sp-core/bls-experimental"] # This feature adds Bandersnatch crypto primitives. # It should not be used in production since the implementation and interface may still # be subject to significant changes. -bandersnatch-experimental = [ "sp-core/bandersnatch-experimental" ] +bandersnatch-experimental = ["sp-core/bandersnatch-experimental"] diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index 89e03f1cabf..82a00935b30 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -16,19 +16,19 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } log = { version = "0.4.17", default-features = false } mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } -serde = { version = "1.0.193", features = ["derive", "alloc"], default-features = false, optional = true } -sp-api = { path = "../api", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-debug-derive = { path = "../debug-derive", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +serde = { version = "1.0.193", features = ["alloc", "derive"], default-features = false, optional = true } +sp-api = { path = "../api", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } thiserror = "1.0" [dev-dependencies] array-bytes = "6.1" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "log/std", @@ -43,4 +43,4 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "scale-info/serde", "sp-core/serde", "sp-runtime/serde" ] +serde = ["dep:serde", "scale-info/serde", "sp-core/serde", "sp-runtime/serde"] diff --git a/substrate/primitives/metadata-ir/Cargo.toml b/substrate/primitives/metadata-ir/Cargo.toml index 77c21b920f2..f73a1d7b380 100644 --- a/substrate/primitives/metadata-ir/Cargo.toml +++ b/substrate/primitives/metadata-ir/Cargo.toml @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } frame-metadata = { version = "16.0.0", default-features = false, features = ["current"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-std = { path = "../std", default-features = false} +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "frame-metadata/std", "scale-info/std", "sp-std/std" ] +default = ["std"] +std = ["codec/std", "frame-metadata/std", "scale-info/std", "sp-std/std"] diff --git a/substrate/primitives/mixnet/Cargo.toml b/substrate/primitives/mixnet/Cargo.toml index bc6878086cf..a03fdab8741 100644 --- a/substrate/primitives/mixnet/Cargo.toml +++ b/substrate/primitives/mixnet/Cargo.toml @@ -20,7 +20,7 @@ sp-application-crypto = { default-features = false, path = "../application-crypt sp-std = { default-features = false, path = "../std" } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/npos-elections/Cargo.toml b/substrate/primitives/npos-elections/Cargo.toml index 44a53e856ba..1ab6c2adf82 100644 --- a/substrate/primitives/npos-elections/Cargo.toml +++ b/substrate/primitives/npos-elections/Cargo.toml @@ -15,18 +15,18 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } -sp-arithmetic = { path = "../arithmetic", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } +sp-arithmetic = { path = "../arithmetic", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [dev-dependencies] rand = "0.8.5" substrate-test-utils = { path = "../../test-utils" } [features] -default = [ "std" ] +default = ["std"] bench = [] std = [ "codec/std", diff --git a/substrate/primitives/npos-elections/fuzzer/Cargo.toml b/substrate/primitives/npos-elections/fuzzer/Cargo.toml index 8bba69a7543..bd1fa856813 100644 --- a/substrate/primitives/npos-elections/fuzzer/Cargo.toml +++ b/substrate/primitives/npos-elections/fuzzer/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "4.4.10", features = ["derive"] } honggfuzz = "0.5" -rand = { version = "0.8", features = ["std", "small_rng"] } +rand = { version = "0.8", features = ["small_rng", "std"] } sp-npos-elections = { path = ".." } sp-runtime = { path = "../../runtime" } diff --git a/substrate/primitives/offchain/Cargo.toml b/substrate/primitives/offchain/Cargo.toml index 5f8821b43c7..201e75802cf 100644 --- a/substrate/primitives/offchain/Cargo.toml +++ b/substrate/primitives/offchain/Cargo.toml @@ -13,10 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } [features] -default = [ "std" ] -std = [ "sp-api/std", "sp-core/std", "sp-runtime/std" ] +default = ["std"] +std = ["sp-api/std", "sp-core/std", "sp-runtime/std"] diff --git a/substrate/primitives/runtime-interface/Cargo.toml b/substrate/primitives/runtime-interface/Cargo.toml index 69a0d112a16..80565420f6b 100644 --- a/substrate/primitives/runtime-interface/Cargo.toml +++ b/substrate/primitives/runtime-interface/Cargo.toml @@ -16,14 +16,14 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } sp-wasm-interface = { path = "../wasm-interface", default-features = false } -sp-std = { path = "../std", default-features = false} -sp-tracing = { path = "../tracing", default-features = false} +sp-std = { path = "../std", default-features = false } +sp-tracing = { path = "../tracing", default-features = false } sp-runtime-interface-proc-macro = { path = "proc-macro" } -sp-externalities = { path = "../externalities", default-features = false} +sp-externalities = { path = "../externalities", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["bytes"] } static_assertions = "1.0.0" primitive-types = { version = "0.12.0", default-features = false } -sp-storage = { path = "../storage", default-features = false} +sp-storage = { path = "../storage", default-features = false } impl-trait-for-tuples = "0.2.2" [dev-dependencies] @@ -35,7 +35,7 @@ rustversion = "1.0.6" trybuild = "1.0.74" [features] -default = [ "std" ] +default = ["std"] std = [ "bytes/std", "codec/std", diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 11236de91ce..6a06bb95654 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -21,4 +21,4 @@ proc-macro-crate = "1.1.3" proc-macro2 = "1.0.56" quote = "1.0.28" expander = "2.0.0" -syn = { version = "2.0.38", features = ["full", "visit", "fold", "extra-traits"] } +syn = { version = "2.0.38", features = ["extra-traits", "fold", "full", "visit"] } diff --git a/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index 8e06aac851f..07c820c0601 100644 --- a/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/substrate/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -13,15 +13,15 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-core = { path = "../../core", default-features = false} -sp-io = { path = "../../io", default-features = false} -sp-runtime-interface = { path = "..", default-features = false} +sp-core = { path = "../../core", default-features = false } +sp-io = { path = "../../io", default-features = false } +sp-runtime-interface = { path = "..", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "sp-core/std", "sp-io/std", diff --git a/substrate/primitives/runtime-interface/test-wasm/Cargo.toml b/substrate/primitives/runtime-interface/test-wasm/Cargo.toml index 7729f89fa39..79e79857341 100644 --- a/substrate/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/substrate/primitives/runtime-interface/test-wasm/Cargo.toml @@ -14,16 +14,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] bytes = { version = "1.1.0", default-features = false } -sp-core = { path = "../../core", default-features = false} -sp-io = { path = "../../io", default-features = false} -sp-runtime-interface = { path = "..", default-features = false} -sp-std = { path = "../../std", default-features = false} +sp-core = { path = "../../core", default-features = false } +sp-io = { path = "../../io", default-features = false } +sp-runtime-interface = { path = "..", default-features = false } +sp-std = { path = "../../std", default-features = false } [build-dependencies] substrate-wasm-builder = { path = "../../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "bytes/std", "sp-core/std", diff --git a/substrate/primitives/runtime/Cargo.toml b/substrate/primitives/runtime/Cargo.toml index 9e2dc8b0265..827ccdbddbb 100644 --- a/substrate/primitives/runtime/Cargo.toml +++ b/substrate/primitives/runtime/Cargo.toml @@ -22,13 +22,13 @@ log = { version = "0.4.17", default-features = false } paste = "1.0" rand = { version = "0.8.5", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } -sp-application-crypto = { path = "../application-crypto", default-features = false} -sp-arithmetic = { path = "../arithmetic", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-io = { path = "../io", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-weights = { path = "../weights", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } +sp-application-crypto = { path = "../application-crypto", default-features = false } +sp-arithmetic = { path = "../arithmetic", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-io = { path = "../io", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-weights = { path = "../weights", default-features = false } docify = { version = "0.2.6" } simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } @@ -45,7 +45,7 @@ substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } [features] runtime-benchmarks = [] try-runtime = [] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "either/use_std", diff --git a/substrate/primitives/session/Cargo.toml b/substrate/primitives/session/Cargo.toml index 4c11762ffb7..b7e43f97300 100644 --- a/substrate/primitives/session/Cargo.toml +++ b/substrate/primitives/session/Cargo.toml @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-api = { path = "../api", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", optional = true} -sp-staking = { path = "../staking", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", optional = true } +sp-staking = { path = "../staking", default-features = false } +sp-std = { path = "../std", default-features = false } sp-keystore = { path = "../keystore", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/staking/Cargo.toml b/substrate/primitives/staking/Cargo.toml index bc69e2f61c1..f52bf3316db 100644 --- a/substrate/primitives/staking/Cargo.toml +++ b/substrate/primitives/staking/Cargo.toml @@ -13,17 +13,17 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } impl-trait-for-tuples = "0.2.2" -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", @@ -32,4 +32,4 @@ std = [ "sp-runtime/std", "sp-std/std", ] -runtime-benchmarks = [ "sp-runtime/runtime-benchmarks" ] +runtime-benchmarks = ["sp-runtime/runtime-benchmarks"] diff --git a/substrate/primitives/state-machine/Cargo.toml b/substrate/primitives/state-machine/Cargo.toml index ec5d9b5ea14..ab07d83af6a 100644 --- a/substrate/primitives/state-machine/Cargo.toml +++ b/substrate/primitives/state-machine/Cargo.toml @@ -22,11 +22,11 @@ rand = { version = "0.8.5", optional = true } smallvec = "1.11.0" thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } -sp-core = { path = "../core", default-features = false} -sp-externalities = { path = "../externalities", default-features = false} -sp-panic-handler = { path = "../panic-handler", optional = true} -sp-std = { path = "../std", default-features = false} -sp-trie = { path = "../trie", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-externalities = { path = "../externalities", default-features = false } +sp-panic-handler = { path = "../panic-handler", optional = true } +sp-std = { path = "../std", default-features = false } +sp-trie = { path = "../trie", default-features = false } trie-db = { version = "0.28.0", default-features = false } [dev-dependencies] @@ -37,7 +37,7 @@ sp-runtime = { path = "../runtime" } assert_matches = "1.5" [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "hash-db/std", diff --git a/substrate/primitives/statement-store/Cargo.toml b/substrate/primitives/statement-store/Cargo.toml index 658229cef22..089af92f062 100644 --- a/substrate/primitives/statement-store/Cargo.toml +++ b/substrate/primitives/statement-store/Cargo.toml @@ -15,13 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-api = { path = "../api", default-features = false} -sp-application-crypto = { path = "../application-crypto", default-features = false} -sp-runtime-interface = { path = "../runtime-interface", default-features = false} -sp-externalities = { path = "../externalities", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-api = { path = "../api", default-features = false } +sp-application-crypto = { path = "../application-crypto", default-features = false } +sp-runtime-interface = { path = "../runtime-interface", default-features = false } +sp-externalities = { path = "../externalities", default-features = false } thiserror = { version = "1.0", optional = true } # ECIES dependencies @@ -31,10 +31,10 @@ curve25519-dalek = { version = "4.0.0", optional = true } aes-gcm = { version = "0.10", optional = true } hkdf = { version = "0.12.0", optional = true } sha2 = { version = "0.10.7", optional = true } -rand = { version = "0.8.5", features = ["small_rng"], optional = true } +rand = { version = "0.8.5", features = ["small_rng"], optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "aes-gcm", "aes-gcm?/std", diff --git a/substrate/primitives/std/Cargo.toml b/substrate/primitives/std/Cargo.toml index 2283a4a97a4..eae37c6dfe3 100644 --- a/substrate/primitives/std/Cargo.toml +++ b/substrate/primitives/std/Cargo.toml @@ -14,5 +14,5 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [features] -default = [ "std" ] +default = ["std"] std = [] diff --git a/substrate/primitives/storage/Cargo.toml b/substrate/primitives/storage/Cargo.toml index 95fa27e9cfb..b7ff48cdd63 100644 --- a/substrate/primitives/storage/Cargo.toml +++ b/substrate/primitives/storage/Cargo.toml @@ -17,12 +17,12 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } impl-serde = { version = "0.4.0", optional = true, default-features = false } ref-cast = "1.0.0" -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } -sp-debug-derive = { path = "../debug-derive", default-features = false} -sp-std = { path = "../std", default-features = false} +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } +sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "impl-serde/std", @@ -32,4 +32,4 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "impl-serde" ] +serde = ["dep:serde", "impl-serde"] diff --git a/substrate/primitives/test-primitives/Cargo.toml b/substrate/primitives/test-primitives/Cargo.toml index cacc3d86d17..0f2a399bffb 100644 --- a/substrate/primitives/test-primitives/Cargo.toml +++ b/substrate/primitives/test-primitives/Cargo.toml @@ -15,13 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", default-features = false, features = ["derive"], optional = true } -sp-application-crypto = { path = "../application-crypto", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-application-crypto = { path = "../application-crypto", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", diff --git a/substrate/primitives/timestamp/Cargo.toml b/substrate/primitives/timestamp/Cargo.toml index 44b0fdd831c..41afab0dcc2 100644 --- a/substrate/primitives/timestamp/Cargo.toml +++ b/substrate/primitives/timestamp/Cargo.toml @@ -16,12 +16,12 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } thiserror = { version = "1.0.48", optional = true } -sp-inherents = { path = "../inherents", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-inherents = { path = "../inherents", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/tracing/Cargo.toml b/substrate/primitives/tracing/Cargo.toml index 0f7e217ec38..43b3683fb06 100644 --- a/substrate/primitives/tracing/Cargo.toml +++ b/substrate/primitives/tracing/Cargo.toml @@ -15,7 +15,7 @@ default-target = "wasm32-unknown-unknown" # with the tracing enabled features = ["with-tracing"] # allowing for linux-gnu here, too, allows for `std` to show up as well -targets = ["x86_64-unknown-linux-gnu", "wasm32-unknown-unknown"] +targets = ["wasm32-unknown-unknown", "x86_64-unknown-linux-gnu"] [dependencies] sp-std = { path = "../std", default-features = false } @@ -29,8 +29,8 @@ tracing-subscriber = { version = "0.2.25", optional = true, features = [ ] } [features] -default = [ "std" ] -with-tracing = [ "codec/derive", "codec/full" ] +default = ["std"] +with-tracing = ["codec/derive", "codec/full"] std = [ "codec/std", "sp-std/std", diff --git a/substrate/primitives/transaction-pool/Cargo.toml b/substrate/primitives/transaction-pool/Cargo.toml index d1d38ffa1af..136d3200202 100644 --- a/substrate/primitives/transaction-pool/Cargo.toml +++ b/substrate/primitives/transaction-pool/Cargo.toml @@ -14,9 +14,9 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-api = { path = "../api", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} +sp-api = { path = "../api", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } [features] -default = [ "std" ] -std = [ "sp-api/std", "sp-runtime/std" ] +default = ["std"] +std = ["sp-api/std", "sp-runtime/std"] diff --git a/substrate/primitives/transaction-storage-proof/Cargo.toml b/substrate/primitives/transaction-storage-proof/Cargo.toml index 5a35dd8f11f..e3bb80b2562 100644 --- a/substrate/primitives/transaction-storage-proof/Cargo.toml +++ b/substrate/primitives/transaction-storage-proof/Cargo.toml @@ -16,14 +16,14 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { version = "0.1.57", optional = true } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-core = { path = "../core", optional = true} -sp-inherents = { path = "../inherents", default-features = false} -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-trie = { path = "../trie", optional = true} +sp-core = { path = "../core", optional = true } +sp-inherents = { path = "../inherents", default-features = false } +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-trie = { path = "../trie", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "async-trait", "codec/std", diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 931ccce2044..1b1e40c9458 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -32,8 +32,8 @@ thiserror = { version = "1.0.48", optional = true } tracing = { version = "0.1.29", optional = true } trie-db = { version = "0.28.0", default-features = false } trie-root = { version = "0.18.0", default-features = false } -sp-core = { path = "../core", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-core = { path = "../core", default-features = false } +sp-std = { path = "../std", default-features = false } sp-externalities = { path = "../externalities", default-features = false } schnellru = { version = "0.2.1", optional = true } @@ -45,7 +45,7 @@ trie-standardmap = "0.16.0" sp-runtime = { path = "../runtime" } [features] -default = [ "std" ] +default = ["std"] std = [ "ahash", "codec/std", diff --git a/substrate/primitives/version/Cargo.toml b/substrate/primitives/version/Cargo.toml index 7f272ead8d9..9860ef54c2d 100644 --- a/substrate/primitives/version/Cargo.toml +++ b/substrate/primitives/version/Cargo.toml @@ -18,15 +18,15 @@ codec = { package = "parity-scale-codec", version = "3.6.1", default-features = impl-serde = { version = "0.4.0", default-features = false, optional = true } parity-wasm = { version = "0.45", optional = true } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, features = ["derive", "alloc"], optional = true } +serde = { version = "1.0.193", default-features = false, features = ["alloc", "derive"], optional = true } thiserror = { version = "1.0.48", optional = true } sp-core-hashing-proc-macro = { path = "../core/hashing/proc-macro" } -sp-runtime = { path = "../runtime", default-features = false} -sp-std = { path = "../std", default-features = false} -sp-version-proc-macro = { path = "proc-macro", default-features = false} +sp-runtime = { path = "../runtime", default-features = false } +sp-std = { path = "../std", default-features = false } +sp-version-proc-macro = { path = "proc-macro", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "impl-serde/std", @@ -39,4 +39,4 @@ std = [ ] # Serde support without relying on std features. -serde = [ "dep:serde", "impl-serde", "sp-runtime/serde" ] +serde = ["dep:serde", "impl-serde", "sp-runtime/serde"] diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index 7fce559e3ed..88f7193f50e 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", features = [ "derive" ] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "fold", "extra-traits", "visit"] } +syn = { version = "2.0.38", features = ["extra-traits", "fold", "full", "visit"] } [dev-dependencies] sp-version = { path = ".." } diff --git a/substrate/primitives/wasm-interface/Cargo.toml b/substrate/primitives/wasm-interface/Cargo.toml index c7413fec43c..e997f558c9d 100644 --- a/substrate/primitives/wasm-interface/Cargo.toml +++ b/substrate/primitives/wasm-interface/Cargo.toml @@ -19,9 +19,9 @@ impl-trait-for-tuples = "0.2.2" log = { version = "0.4.17", optional = true } wasmtime = { version = "8.0.1", default-features = false, optional = true } anyhow = { version = "1.0.68", optional = true } -sp-std = { path = "../std", default-features = false} +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] -std = [ "codec/std", "log/std", "sp-std/std", "wasmtime" ] -wasmtime = [ "anyhow", "dep:wasmtime" ] +default = ["std"] +std = ["codec/std", "log/std", "sp-std/std", "wasmtime"] +wasmtime = ["anyhow", "dep:wasmtime"] diff --git a/substrate/primitives/weights/Cargo.toml b/substrate/primitives/weights/Cargo.toml index c36714a76a3..a573495ff91 100644 --- a/substrate/primitives/weights/Cargo.toml +++ b/substrate/primitives/weights/Cargo.toml @@ -15,15 +15,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.193", default-features = false, optional = true, features = ["derive", "alloc"] } +serde = { version = "1.0.193", default-features = false, optional = true, features = ["alloc", "derive"] } smallvec = "1.11.0" -sp-arithmetic = { path = "../arithmetic", default-features = false} -sp-core = { path = "../core", default-features = false} -sp-debug-derive = { path = "../debug-derive", default-features = false} -sp-std = { path = "../std", default-features = false} +sp-arithmetic = { path = "../arithmetic", default-features = false } +sp-core = { path = "../core", default-features = false } +sp-debug-derive = { path = "../debug-derive", default-features = false } +sp-std = { path = "../std", default-features = false } [features] -default = [ "std" ] +default = ["std"] std = [ "codec/std", "scale-info/std", @@ -35,7 +35,7 @@ std = [ ] # By default some types have documentation, `full-metadata-docs` allows to add documentation to # more types in the metadata. -full-metadata-docs = [ "scale-info/docs" ] +full-metadata-docs = ["scale-info/docs"] # Serde support without relying on std features. serde = [ diff --git a/substrate/scripts/ci/deny.toml b/substrate/scripts/ci/deny.toml index 1afb4a4f693..b1dbf773e31 100644 --- a/substrate/scripts/ci/deny.toml +++ b/substrate/scripts/ci/deny.toml @@ -5,7 +5,7 @@ unlicensed = "deny" # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. allow = [ - "MPL-2.0", + "MPL-2.0", ] # List of explicitly disallowed licenses # See https://spdx.org/licenses/ for list of possible licenses @@ -34,70 +34,70 @@ confidence-threshold = 0.8 # Allow 1 or more licenses on a per-crate basis, so that particular licenses # aren't accepted for every possible crate as with the normal allow list exceptions = [ - # Each entry is the crate and version constraint, and its specific allow list - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "chain-spec-builder" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "mmr-gadget" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-bench" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "staging-node-cli" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-inspect" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-template-release" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-testing" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-authority-discovery" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-basic-authorship" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-block-builder" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec-derive" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-cli" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-db" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-aura" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-epochs" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-manual-seal" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-pow" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-slots" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-common" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmi" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmtime" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-informant" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-keystore" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-mixnet" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-bitswap" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-common" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-gossip" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-light" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-sync" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-transactions" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-statement" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-offchain" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-peerset" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-proposer-metrics" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-server" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-spec-v2" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-runtime-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service-test" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-state-db" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-statement-store" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-storage-monitor" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-sysinfo" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-telemetry" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-tracing" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool-api" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "subkey" }, - { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "substrate" }, + # Each entry is the crate and version constraint, and its specific allow list + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "chain-spec-builder" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "mmr-gadget" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-bench" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-inspect" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-template-release" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "node-testing" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-authority-discovery" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-basic-authorship" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-block-builder" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-chain-spec-derive" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-cli" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-client-db" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-aura" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-babe-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-beefy-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-epochs" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-grandpa-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-manual-seal" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-pow" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-consensus-slots" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-common" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmi" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-executor-wasmtime" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-informant" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-keystore" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-mixnet" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-bitswap" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-common" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-gossip" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-light" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-statement" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-sync" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-transactions" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-offchain" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-peerset" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-proposer-metrics" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-server" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-rpc-spec-v2" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-runtime-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service-test" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-state-db" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-statement-store" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-storage-monitor" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-sysinfo" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-telemetry" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-tracing" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-transaction-pool-api" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "staging-node-cli" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "subkey" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "substrate" }, ] # Some crates don't have (easily) machine readable licensing information, @@ -114,6 +114,6 @@ expression = "MIT AND ISC AND OpenSSL" # and the crate will be checked normally, which may produce warnings or errors # depending on the rest of your configuration license-files = [ - # Each entry is a crate relative path, and the (opaque) hash of its contents - { path = "LICENSE", hash = 0xbd0eed23 } + # Each entry is a crate relative path, and the (opaque) hash of its contents + { path = "LICENSE", hash = 0xbd0eed23 }, ] diff --git a/substrate/test-utils/Cargo.toml b/substrate/test-utils/Cargo.toml index 31bdc0f663a..17696e8229c 100644 --- a/substrate/test-utils/Cargo.toml +++ b/substrate/test-utils/Cargo.toml @@ -17,5 +17,5 @@ futures = "0.3.16" tokio = { version = "1.22.0", features = ["macros", "time"] } [dev-dependencies] -trybuild = { version = "1.0.74", features = [ "diff" ] } +trybuild = { version = "1.0.74", features = ["diff"] } sc-service = { path = "../client/service" } diff --git a/substrate/test-utils/cli/Cargo.toml b/substrate/test-utils/cli/Cargo.toml index 022db32c34f..4f20e9e2ce5 100644 --- a/substrate/test-utils/cli/Cargo.toml +++ b/substrate/test-utils/cli/Cargo.toml @@ -26,4 +26,4 @@ sc-service = { path = "../../client/service" } futures = "0.3.28" [features] -try-runtime = [ "node-cli/try-runtime" ] +try-runtime = ["node-cli/try-runtime"] diff --git a/substrate/test-utils/client/Cargo.toml b/substrate/test-utils/client/Cargo.toml index dc034de876a..9829ae531fe 100644 --- a/substrate/test-utils/client/Cargo.toml +++ b/substrate/test-utils/client/Cargo.toml @@ -22,13 +22,13 @@ serde_json = "1.0.108" sc-client-api = { path = "../../client/api" } sc-client-db = { path = "../../client/db", default-features = false, features = [ "test-helpers", -]} +] } sc-consensus = { path = "../../client/consensus/common" } sc-executor = { path = "../../client/executor" } sc-offchain = { path = "../../client/offchain" } sc-service = { path = "../../client/service", default-features = false, features = [ "test-helpers", -]} +] } sp-blockchain = { path = "../../primitives/blockchain" } sp-consensus = { path = "../../primitives/consensus/common" } sp-core = { path = "../../primitives/core" } diff --git a/substrate/test-utils/runtime/Cargo.toml b/substrate/test-utils/runtime/Cargo.toml index 88f8a1b572d..655c7f0fec1 100644 --- a/substrate/test-utils/runtime/Cargo.toml +++ b/substrate/test-utils/runtime/Cargo.toml @@ -13,37 +13,37 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } +sp-application-crypto = { path = "../../primitives/application-crypto", default-features = false, features = ["serde"] } sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false, features = ["serde"] } sp-consensus-babe = { path = "../../primitives/consensus/babe", default-features = false, features = ["serde"] } -sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false} -sp-block-builder = { path = "../../primitives/block-builder", default-features = false} +sp-genesis-builder = { path = "../../primitives/genesis-builder", default-features = false } +sp-block-builder = { path = "../../primitives/block-builder", default-features = false } codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -sp-inherents = { path = "../../primitives/inherents", default-features = false} -sp-keyring = { path = "../../primitives/keyring", optional = true} -sp-offchain = { path = "../../primitives/offchain", default-features = false} -sp-core = { path = "../../primitives/core", default-features = false} -sp-std = { path = "../../primitives/std", default-features = false} -sp-io = { path = "../../primitives/io", default-features = false} -frame-support = { path = "../../frame/support", default-features = false} -sp-version = { path = "../../primitives/version", default-features = false} -sp-session = { path = "../../primitives/session", default-features = false} -sp-api = { path = "../../primitives/api", default-features = false} +sp-inherents = { path = "../../primitives/inherents", default-features = false } +sp-keyring = { path = "../../primitives/keyring", optional = true } +sp-offchain = { path = "../../primitives/offchain", default-features = false } +sp-core = { path = "../../primitives/core", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } +frame-support = { path = "../../frame/support", default-features = false } +sp-version = { path = "../../primitives/version", default-features = false } +sp-session = { path = "../../primitives/session", default-features = false } +sp-api = { path = "../../primitives/api", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false, features = ["serde"] } -pallet-babe = { path = "../../frame/babe", default-features = false} -pallet-balances = { path = "../../frame/balances", default-features = false} -frame-executive = { path = "../../frame/executive", default-features = false} -frame-system = { path = "../../frame/system", default-features = false} -frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false} -pallet-timestamp = { path = "../../frame/timestamp", default-features = false} +pallet-babe = { path = "../../frame/babe", default-features = false } +pallet-balances = { path = "../../frame/balances", default-features = false } +frame-executive = { path = "../../frame/executive", default-features = false } +frame-system = { path = "../../frame/system", default-features = false } +frame-system-rpc-runtime-api = { path = "../../frame/system/rpc/runtime-api", default-features = false } +pallet-timestamp = { path = "../../frame/timestamp", default-features = false } sp-consensus-grandpa = { path = "../../primitives/consensus/grandpa", default-features = false, features = ["serde"] } -sp-trie = { path = "../../primitives/trie", default-features = false} -sp-transaction-pool = { path = "../../primitives/transaction-pool", default-features = false} +sp-trie = { path = "../../primitives/trie", default-features = false } +sp-transaction-pool = { path = "../../primitives/transaction-pool", default-features = false } trie-db = { version = "0.28.0", default-features = false } -sc-service = { path = "../../client/service", default-features = false, features = ["test-helpers"], optional = true} -sp-state-machine = { path = "../../primitives/state-machine", default-features = false} -sp-externalities = { path = "../../primitives/externalities", default-features = false} +sc-service = { path = "../../client/service", default-features = false, features = ["test-helpers"], optional = true } +sp-state-machine = { path = "../../primitives/state-machine", default-features = false } +sp-externalities = { path = "../../primitives/externalities", default-features = false } # 3rd party array-bytes = { version = "6.1", optional = true } @@ -65,7 +65,7 @@ serde_json = { version = "1.0.108", default-features = false, features = ["alloc substrate-wasm-builder = { path = "../../utils/wasm-builder", optional = true } [features] -default = [ "std" ] +default = ["std"] std = [ "array-bytes", @@ -108,4 +108,4 @@ std = [ "trie-db/std", ] # Special feature to disable logging -disable-logging = [ "sp-api/disable-logging" ] +disable-logging = ["sp-api/disable-logging"] diff --git a/substrate/utils/binary-merkle-tree/Cargo.toml b/substrate/utils/binary-merkle-tree/Cargo.toml index 6bb1d5f0f1e..f36e4f4e0b6 100644 --- a/substrate/utils/binary-merkle-tree/Cargo.toml +++ b/substrate/utils/binary-merkle-tree/Cargo.toml @@ -20,6 +20,6 @@ sp-core = { path = "../../primitives/core" } sp-runtime = { path = "../../primitives/runtime" } [features] -debug = [ "array-bytes", "log" ] -default = [ "debug", "std" ] -std = [ "hash-db/std", "log/std", "sp-core/std", "sp-runtime/std" ] +debug = ["array-bytes", "log"] +default = ["debug", "std"] +std = ["hash-db/std", "log/std", "sp-core/std", "sp-runtime/std"] diff --git a/substrate/utils/frame/benchmarking-cli/Cargo.toml b/substrate/utils/frame/benchmarking-cli/Cargo.toml index a6f6264b65c..b6eca132067 100644 --- a/substrate/utils/frame/benchmarking-cli/Cargo.toml +++ b/substrate/utils/frame/benchmarking-cli/Cargo.toml @@ -34,11 +34,11 @@ frame-benchmarking = { path = "../../../frame/benchmarking" } frame-support = { path = "../../../frame/support" } frame-system = { path = "../../../frame/system" } sc-block-builder = { path = "../../../client/block-builder" } -sc-cli = { path = "../../../client/cli", default-features = false} +sc-cli = { path = "../../../client/cli", default-features = false } sc-client-api = { path = "../../../client/api" } -sc-client-db = { path = "../../../client/db", default-features = false} +sc-client-db = { path = "../../../client/db", default-features = false } sc-executor = { path = "../../../client/executor" } -sc-service = { path = "../../../client/service", default-features = false} +sc-service = { path = "../../../client/service", default-features = false } sc-sysinfo = { path = "../../../client/sysinfo" } sp-api = { path = "../../../primitives/api" } sp-blockchain = { path = "../../../primitives/blockchain" } @@ -56,7 +56,7 @@ sp-wasm-interface = { path = "../../../primitives/wasm-interface" } gethostname = "0.2.3" [features] -default = [ "rocksdb" ] +default = ["rocksdb"] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", @@ -65,4 +65,4 @@ runtime-benchmarks = [ "sc-service/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] -rocksdb = [ "sc-cli/rocksdb", "sc-client-db/rocksdb" ] +rocksdb = ["sc-cli/rocksdb", "sc-client-db/rocksdb"] diff --git a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index d25e6ec67c9..6d3cb545efb 100644 --- a/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/substrate/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -21,7 +21,7 @@ sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } trie-db = "0.28.0" -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } # Substrate Dependencies sc-client-api = { path = "../../../../client/api" } diff --git a/substrate/utils/frame/rpc/support/Cargo.toml b/substrate/utils/frame/rpc/support/Cargo.toml index 22283fbf450..da56297c82f 100644 --- a/substrate/utils/frame/rpc/support/Cargo.toml +++ b/substrate/utils/frame/rpc/support/Cargo.toml @@ -21,7 +21,7 @@ sp-storage = { path = "../../../../primitives/storage" } [dev-dependencies] scale-info = "2.10.0" -jsonrpsee = { version = "0.16.2", features = ["ws-client", "jsonrpsee-types"] } +jsonrpsee = { version = "0.16.2", features = ["jsonrpsee-types", "ws-client"] } tokio = "1.22.0" sp-core = { path = "../../../../primitives/core" } sp-runtime = { path = "../../../../primitives/runtime" } diff --git a/substrate/utils/frame/rpc/system/Cargo.toml b/substrate/utils/frame/rpc/system/Cargo.toml index 77c3b7eeee3..636f2cd0485 100644 --- a/substrate/utils/frame/rpc/system/Cargo.toml +++ b/substrate/utils/frame/rpc/system/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.6.1" } -jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["client-core", "macros", "server"] } futures = "0.3.21" log = "0.4.17" frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } diff --git a/substrate/utils/frame/try-runtime/cli/Cargo.toml b/substrate/utils/frame/try-runtime/cli/Cargo.toml index 97efa0e6624..31e95a8be41 100644 --- a/substrate/utils/frame/try-runtime/cli/Cargo.toml +++ b/substrate/utils/frame/try-runtime/cli/Cargo.toml @@ -12,7 +12,7 @@ description = "Cli command runtime testing and dry-running" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -remote-externalities = { package = "frame-remote-externalities" , path = "../../remote-externalities" } +remote-externalities = { package = "frame-remote-externalities", path = "../../remote-externalities" } sc-cli = { path = "../../../../client/cli" } sc-executor = { path = "../../../../client/executor" } sp-consensus-aura = { path = "../../../../primitives/consensus/aura" } @@ -31,7 +31,7 @@ sp-version = { path = "../../../../primitives/version" } sp-debug-derive = { path = "../../../../primitives/debug-derive" } sp-api = { path = "../../../../primitives/api" } sp-weights = { path = "../../../../primitives/weights" } -frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true} +frame-try-runtime = { path = "../../../../frame/try-runtime", optional = true } substrate-rpc-client = { path = "../../rpc/client" } async-trait = "0.1.57" -- GitLab From dfd7b15effce7ee28a379f2aa0f21498393fa3e7 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 1 Dec 2023 11:19:09 +0000 Subject: [PATCH 175/199] Contracts: make benchmark dependencies optional in `std` feature (#2576) `wasm-instrument` and `rand` are optional and only used in benchmarking, so should not be pulled in by default as part of the `std` feature. --- substrate/frame/contracts/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/substrate/frame/contracts/Cargo.toml b/substrate/frame/contracts/Cargo.toml index 80856bef3fd..29c39c047a3 100644 --- a/substrate/frame/contracts/Cargo.toml +++ b/substrate/frame/contracts/Cargo.toml @@ -88,7 +88,7 @@ std = [ "pallet-proxy/std", "pallet-timestamp/std", "pallet-utility/std", - "rand/std", + "rand?/std", "scale-info/std", "serde", "sp-api/std", @@ -97,7 +97,7 @@ std = [ "sp-keystore/std", "sp-runtime/std", "sp-std/std", - "wasm-instrument/std", + "wasm-instrument?/std", "wasmi/std", "xcm-builder/std", "xcm/std", -- GitLab From 7f0beaf2a57654259ee19b492e627d56bf90bfd4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Dec 2023 13:04:12 +0100 Subject: [PATCH 176/199] Bump proc-macro-crate from 1.3.1 to 2.0.0 (#2557) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [proc-macro-crate](https://github.com/bkchr/proc-macro-crate) from 1.3.1 to 2.0.0.
Release notes

Sourced from proc-macro-crate's releases.

v2.0.0

What's Changed

Full Changelog: https://github.com/bkchr/proc-macro-crate/compare/v1.3.1...v2.0.0

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=proc-macro-crate&package-manager=cargo&previous-version=1.3.1&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 52 +++++++++++++------ .../parachain-system/proc-macro/Cargo.toml | 2 +- polkadot/node/gum/proc-macro/Cargo.toml | 2 +- substrate/client/chain-spec/derive/Cargo.toml | 2 +- .../client/tracing/proc-macro/Cargo.toml | 2 +- .../solution-type/Cargo.toml | 2 +- .../frame/staking/reward-curve/Cargo.toml | 2 +- .../frame/support/procedural/tools/Cargo.toml | 2 +- .../primitives/api/proc-macro/Cargo.toml | 2 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- 10 files changed, 45 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c83eeb050df..9c5a2c57da0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3604,7 +3604,7 @@ dependencies = [ name = "cumulus-pallet-parachain-system-proc-macro" version = "0.1.0" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", @@ -5054,7 +5054,7 @@ checksum = "f5aa1e3ae159e592ad222dc90c5acbad632b527779ba88486abe92782ab268bd" dependencies = [ "expander 0.0.4", "indexmap 1.9.3", - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -5378,7 +5378,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "parity-scale-codec", - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "scale-info", @@ -5543,7 +5543,7 @@ name = "frame-support-procedural-tools" version = "4.0.0-dev" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", @@ -6768,7 +6768,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44e8ab85614a08792b9bff6c8feee23be78c98d0182d4c622c05256ab553892a" dependencies = [ "heck", - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -8199,7 +8199,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro-error", "proc-macro2", "quote", @@ -8843,7 +8843,7 @@ dependencies = [ "itertools 0.11.0", "layout-rs", "petgraph", - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -10654,7 +10654,7 @@ dependencies = [ name = "pallet-staking-reward-curve" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "sp-runtime", @@ -11259,7 +11259,7 @@ version = "3.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -13490,6 +13490,15 @@ dependencies = [ "toml_edit 0.19.14", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -14831,7 +14840,7 @@ dependencies = [ name = "sc-chain-spec-derive" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", @@ -16087,7 +16096,7 @@ dependencies = [ name = "sc-tracing-proc-macro" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", @@ -16177,7 +16186,7 @@ version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" dependencies = [ - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 1.0.109", @@ -16966,7 +16975,7 @@ dependencies = [ "assert_matches", "blake2 0.10.6", "expander 2.0.0", - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", @@ -17643,7 +17652,7 @@ version = "11.0.0" dependencies = [ "Inflector", "expander 2.0.0", - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", @@ -17655,7 +17664,7 @@ version = "11.0.0" source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "Inflector", - "proc-macro-crate", + "proc-macro-crate 1.3.1", "proc-macro2", "quote", "syn 2.0.38", @@ -19337,6 +19346,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.0.0", + "toml_datetime", + "winnow", +] + [[package]] name = "toml_edit" version = "0.21.0" @@ -19451,7 +19471,7 @@ version = "1.0.0" dependencies = [ "assert_matches", "expander 2.0.0", - "proc-macro-crate", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.38", diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index f4bfdf27062..ed85b718f50 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -13,7 +13,7 @@ proc-macro = true syn = "2.0.38" proc-macro2 = "1.0.64" quote = "1.0.33" -proc-macro-crate = "1.3.1" +proc-macro-crate = "2.0.0" [features] default = ["std"] diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index cd6e19b3dd2..c775c570705 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -16,7 +16,7 @@ proc-macro = true syn = { version = "2.0.38", features = ["extra-traits", "full"] } quote = "1.0.28" proc-macro2 = "1.0.56" -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.0" expander = "2.0.0" [dev-dependencies] diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index 74b8b656a40..c200d84244d 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = "1.0.28" syn = "2.0.38" diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index 2e4a0c8f3ee..f0cca5c41b6 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } syn = { version = "2.0.38", features = ["extra-traits", "full", "parsing", "proc-macro"] } diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index 1e3002d5dc4..e5d59685661 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true syn = { version = "2.0.38", features = ["full", "visit"] } quote = "1.0.28" proc-macro2 = "1.0.56" -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.0" [dev-dependencies] parity-scale-codec = "3.6.1" diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index 0a725996115..2d57dc60fff 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = "1.0.28" syn = { version = "2.0.38", features = ["full", "visit"] } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index 836e55b8a63..4921a2a9428 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -12,7 +12,7 @@ description = "Proc macro helpers for procedural macros" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = "1.0.28" syn = { version = "2.0.38", features = ["extra-traits", "full", "visit"] } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index 8188299f533..232b988124b 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -20,7 +20,7 @@ quote = "1.0.28" syn = { version = "2.0.38", features = ["extra-traits", "fold", "full", "visit"] } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.0" expander = "2.0.0" Inflector = "0.11.4" diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 6a06bb95654..6018ef6c2d4 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] Inflector = "0.11.4" -proc-macro-crate = "1.1.3" +proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = "1.0.28" expander = "2.0.0" -- GitLab From 095f4bd9ae6ed691b19768f7daac9e6833f425ed Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 1 Dec 2023 16:39:07 +0100 Subject: [PATCH 177/199] Sassafras Consensus Pallet (#1577) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR introduces the pallet for Sassafras consensus. ## Non Goals The pallet delivers only the bare-bones and doesn't deliver support for auxiliary functionalities such as equivocation report and support for epoch change via session pallet. These functionalities were drafted in the [main PR](https://github.com/paritytech/polkadot-sdk/pull/1336), but IMO is better to introduce this auxiliary stuff in a follow up PR and after client code. ## Potential follow ups https://github.com/paritytech/polkadot-sdk/issues/2364 --------- Co-authored-by: Sebastian Kunert Co-authored-by: Koute Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Bastian Köcher --- Cargo.lock | 50 +- Cargo.toml | 1 + substrate/frame/sassafras/Cargo.toml | 59 + substrate/frame/sassafras/README.md | 8 + substrate/frame/sassafras/src/benchmarking.rs | 272 +++++ .../src/data/25_tickets_100_auths.bin | Bin 0 -> 24728 bytes .../sassafras/src/data/benchmark-results.md | 99 ++ .../frame/sassafras/src/data/tickets-sort.md | 274 +++++ .../frame/sassafras/src/data/tickets-sort.png | Bin 0 -> 33919 bytes substrate/frame/sassafras/src/lib.rs | 1081 +++++++++++++++++ substrate/frame/sassafras/src/mock.rs | 343 ++++++ substrate/frame/sassafras/src/tests.rs | 874 +++++++++++++ substrate/frame/sassafras/src/weights.rs | 425 +++++++ .../primitives/consensus/sassafras/Cargo.toml | 12 +- .../primitives/consensus/sassafras/README.md | 12 +- .../consensus/sassafras/src/digests.rs | 6 +- .../primitives/consensus/sassafras/src/lib.rs | 38 +- .../consensus/sassafras/src/ticket.rs | 43 +- .../primitives/consensus/sassafras/src/vrf.rs | 2 +- substrate/primitives/core/Cargo.toml | 2 +- substrate/primitives/core/src/bandersnatch.rs | 289 +++-- 21 files changed, 3763 insertions(+), 127 deletions(-) create mode 100644 substrate/frame/sassafras/Cargo.toml create mode 100644 substrate/frame/sassafras/README.md create mode 100644 substrate/frame/sassafras/src/benchmarking.rs create mode 100644 substrate/frame/sassafras/src/data/25_tickets_100_auths.bin create mode 100644 substrate/frame/sassafras/src/data/benchmark-results.md create mode 100644 substrate/frame/sassafras/src/data/tickets-sort.md create mode 100644 substrate/frame/sassafras/src/data/tickets-sort.png create mode 100644 substrate/frame/sassafras/src/lib.rs create mode 100644 substrate/frame/sassafras/src/mock.rs create mode 100644 substrate/frame/sassafras/src/tests.rs create mode 100644 substrate/frame/sassafras/src/weights.rs diff --git a/Cargo.lock b/Cargo.lock index 9c5a2c57da0..241a2fd3d15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -544,7 +544,7 @@ dependencies = [ [[package]] name = "ark-secret-scalar" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" +source = "git+https://github.com/w3f/ring-vrf?rev=2019248#2019248785389b3246d55b1c3b0e9bdef4454cb7" dependencies = [ "ark-ec", "ark-ff", @@ -552,7 +552,7 @@ dependencies = [ "ark-std", "ark-transcript", "digest 0.10.7", - "rand_core 0.6.4", + "getrandom_or_panic", "zeroize", ] @@ -593,7 +593,7 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" +source = "git+https://github.com/w3f/ring-vrf?rev=2019248#2019248785389b3246d55b1c3b0e9bdef4454cb7" dependencies = [ "ark-ff", "ark-serialize", @@ -1225,7 +1225,7 @@ dependencies = [ [[package]] name = "bandersnatch_vrfs" version = "0.0.4" -source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" +source = "git+https://github.com/w3f/ring-vrf?rev=2019248#2019248785389b3246d55b1c3b0e9bdef4454cb7" dependencies = [ "ark-bls12-381", "ark-ec", @@ -2716,7 +2716,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f" +source = "git+https://github.com/w3f/ring-proof#61e7b528bc0170d6bf541be32440d569b784425d" dependencies = [ "ark-ec", "ark-ff", @@ -2724,6 +2724,7 @@ dependencies = [ "ark-serialize", "ark-std", "fflonk", + "getrandom_or_panic", "merlin 3.0.0", "rand_chacha 0.3.1", ] @@ -4525,7 +4526,7 @@ checksum = "86e3bdc80eee6e16b2b6b0f87fbc98c04bee3455e35174c0de1a125d0688c632" [[package]] name = "dleq_vrf" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=3ddc205#3ddc2051066c4b3f0eadd0ba5700df12500d9754" +source = "git+https://github.com/w3f/ring-vrf?rev=2019248#2019248785389b3246d55b1c3b0e9bdef4454cb7" dependencies = [ "ark-ec", "ark-ff", @@ -4535,7 +4536,6 @@ dependencies = [ "ark-std", "ark-transcript", "arrayvec 0.7.4", - "rand_core 0.6.4", "zeroize", ] @@ -4869,9 +4869,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece" dependencies = [ "humantime", "is-terminal", @@ -5132,7 +5132,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84f2e425d9790201ba4af4630191feac6dcc98765b118d4d18e91d23c2353866" dependencies = [ - "env_logger 0.10.0", + "env_logger 0.10.1", "log", ] @@ -5912,6 +5912,16 @@ dependencies = [ "wasi 0.11.0+wasi-snapshot-preview1", ] +[[package]] +name = "getrandom_or_panic" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" +dependencies = [ + "rand 0.8.5", + "rand_core 0.6.4", +] + [[package]] name = "ghash" version = "0.4.4" @@ -10509,6 +10519,24 @@ dependencies = [ "sp-std 8.0.0", ] +[[package]] +name = "pallet-sassafras" +version = "0.3.5-dev" +dependencies = [ + "array-bytes 6.1.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-consensus-sassafras", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std 8.0.0", +] + [[package]] name = "pallet-scheduler" version = "4.0.0-dev" @@ -14164,7 +14192,7 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/burdges/ring-proof?branch=patch-1#05a756076cb20f981a52afea3a620168de49f95f" +source = "git+https://github.com/w3f/ring-proof#61e7b528bc0170d6bf541be32440d569b784425d" dependencies = [ "ark-ec", "ark-ff", diff --git a/Cargo.toml b/Cargo.toml index 0a7bf912e48..5fb7c0f2315 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,6 +346,7 @@ members = [ "substrate/frame/root-testing", "substrate/frame/safe-mode", "substrate/frame/salary", + "substrate/frame/sassafras", "substrate/frame/scheduler", "substrate/frame/scored-pool", "substrate/frame/session", diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml new file mode 100644 index 00000000000..7ab2e2e1770 --- /dev/null +++ b/substrate/frame/sassafras/Cargo.toml @@ -0,0 +1,59 @@ +[package] +name = "pallet-sassafras" +version = "0.3.5-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Consensus extension module for Sassafras consensus." +readme = "README.md" +publish = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } +scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } +log = { version = "0.4.17", default-features = false } +sp-consensus-sassafras = { path = "../../primitives/consensus/sassafras", default-features = false, features = ["serde"] } +sp-io = { path = "../../primitives/io", default-features = false } +sp-runtime = { path = "../../primitives/runtime", default-features = false } +sp-std = { path = "../../primitives/std", default-features = false } + +[dev-dependencies] +array-bytes = "6.1" +sp-core = { path = "../../primitives/core" } + +[features] +default = [ "std" ] +std = [ + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-codec/std", + "scale-info/std", + "sp-consensus-sassafras/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] +# Construct dummy ring context on genesis. +# Mostly used for testing and development. +construct-dummy-ring-context = [] diff --git a/substrate/frame/sassafras/README.md b/substrate/frame/sassafras/README.md new file mode 100644 index 00000000000..f0e24a05355 --- /dev/null +++ b/substrate/frame/sassafras/README.md @@ -0,0 +1,8 @@ +Runtime module for SASSAFRAS consensus. + +- Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41 +- Protocol RFC proposal: https://github.com/polkadot-fellows/RFCs/pull/26 + +# ⚠️ WARNING ⚠️ + +The crate interfaces and structures are experimental and may be subject to changes. diff --git a/substrate/frame/sassafras/src/benchmarking.rs b/substrate/frame/sassafras/src/benchmarking.rs new file mode 100644 index 00000000000..95a2b4bbce4 --- /dev/null +++ b/substrate/frame/sassafras/src/benchmarking.rs @@ -0,0 +1,272 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for the Sassafras pallet. + +use crate::*; +use sp_consensus_sassafras::{vrf::VrfSignature, EphemeralPublic, EpochConfiguration}; + +use frame_benchmarking::v2::*; +use frame_support::traits::Hooks; +use frame_system::RawOrigin; + +const LOG_TARGET: &str = "sassafras::benchmark"; + +const TICKETS_DATA: &[u8] = include_bytes!("data/25_tickets_100_auths.bin"); + +fn make_dummy_vrf_signature() -> VrfSignature { + // This leverages our knowledge about serialized vrf signature structure. + // Mostly to avoid to import all the bandersnatch primitive just for this test. + let buf = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xb5, 0x5f, 0x8e, 0xc7, 0x68, 0xf5, 0x05, 0x3f, 0xa9, + 0x18, 0xca, 0x07, 0x13, 0xc7, 0x4b, 0xa3, 0x9a, 0x97, 0xd3, 0x76, 0x8f, 0x0c, 0xbf, 0x2e, + 0xd4, 0xf9, 0x3a, 0xae, 0xc1, 0x96, 0x2a, 0x64, 0x80, + ]; + VrfSignature::decode(&mut &buf[..]).unwrap() +} + +#[benchmarks] +mod benchmarks { + use super::*; + + // For first block (#1) we do some extra operation. + // But is a one shot operation, so we don't account for it here. + // We use 0, as it will be the path used by all the blocks with n != 1 + #[benchmark] + fn on_initialize() { + let block_num = BlockNumberFor::::from(0u32); + + let slot_claim = SlotClaim { + authority_idx: 0, + slot: Default::default(), + vrf_signature: make_dummy_vrf_signature(), + ticket_claim: None, + }; + frame_system::Pallet::::deposit_log((&slot_claim).into()); + + // We currently don't account for the potential weight added by the `on_finalize` + // incremental sorting of the tickets. + + #[block] + { + // According to `Hooks` trait docs, `on_finalize` `Weight` should be bundled + // together with `on_initialize` `Weight`. + Pallet::::on_initialize(block_num); + Pallet::::on_finalize(block_num) + } + } + + // Weight for the default internal epoch change trigger. + // + // Parameters: + // - `x`: number of authorities (1:100). + // - `y`: epoch length in slots (1000:5000) + // + // This accounts for the worst case which includes: + // - load the full ring context. + // - recompute the ring verifier. + // - sorting the epoch tickets in one shot + // (here we account for the very unluky scenario where we haven't done any sort work yet) + // - pending epoch change config. + // + // For this bench we assume a redundancy factor of 2 (suggested value to be used in prod). + #[benchmark] + fn enact_epoch_change(x: Linear<1, 100>, y: Linear<1000, 5000>) { + let authorities_count = x as usize; + let epoch_length = y as u32; + let redundancy_factor = 2; + + let unsorted_tickets_count = epoch_length * redundancy_factor; + + let mut meta = TicketsMetadata { unsorted_tickets_count, tickets_count: [0, 0] }; + let config = EpochConfiguration { redundancy_factor, attempts_number: 32 }; + + // Triggers ring verifier computation for `x` authorities + let mut raw_data = TICKETS_DATA; + let (authorities, _): (Vec, Vec) = + Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); + let next_authorities: Vec<_> = authorities[..authorities_count].to_vec(); + let next_authorities = WeakBoundedVec::force_from(next_authorities, None); + NextAuthorities::::set(next_authorities); + + // Triggers JIT sorting tickets + (0..meta.unsorted_tickets_count) + .collect::>() + .chunks(SEGMENT_MAX_SIZE as usize) + .enumerate() + .for_each(|(segment_id, chunk)| { + let segment = chunk + .iter() + .map(|i| { + let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); + TicketId::from_le_bytes(id_bytes) + }) + .collect::>(); + UnsortedSegments::::insert( + segment_id as u32, + BoundedVec::truncate_from(segment), + ); + }); + + // Triggers some code related to config change (dummy values) + NextEpochConfig::::set(Some(config)); + PendingEpochConfigChange::::set(Some(config)); + + // Triggers the cleanup of the "just elapsed" epoch tickets (i.e. the current one) + let epoch_tag = EpochIndex::::get() & 1; + meta.tickets_count[epoch_tag as usize] = epoch_length; + (0..epoch_length).for_each(|i| { + let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); + let id = TicketId::from_le_bytes(id_bytes); + TicketsIds::::insert((epoch_tag as u8, i), id); + let body = TicketBody { + attempt_idx: i, + erased_public: EphemeralPublic([i as u8; 32]), + revealed_public: EphemeralPublic([i as u8; 32]), + }; + TicketsData::::set(id, Some(body)); + }); + + TicketsMeta::::set(meta); + + #[block] + { + Pallet::::should_end_epoch(BlockNumberFor::::from(3u32)); + let next_authorities = Pallet::::next_authorities(); + // Using a different set of authorities triggers the recomputation of ring verifier. + Pallet::::enact_epoch_change(Default::default(), next_authorities); + } + } + + #[benchmark] + fn submit_tickets(x: Linear<1, 25>) { + let tickets_count = x as usize; + + let mut raw_data = TICKETS_DATA; + let (authorities, tickets): (Vec, Vec) = + Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); + + log::debug!(target: LOG_TARGET, "PreBuiltTickets: {} tickets, {} authorities", tickets.len(), authorities.len()); + + // Set `NextRandomness` to the same value used for pre-built tickets + // (see `make_tickets_data` test). + NextRandomness::::set([0; 32]); + + Pallet::::update_ring_verifier(&authorities); + + // Set next epoch config to accept all the tickets + let next_config = EpochConfiguration { attempts_number: 1, redundancy_factor: u32::MAX }; + NextEpochConfig::::set(Some(next_config)); + + // Use the authorities in the pre-build tickets + let authorities = WeakBoundedVec::force_from(authorities, None); + NextAuthorities::::set(authorities); + + let tickets = tickets[..tickets_count].to_vec(); + let tickets = BoundedVec::truncate_from(tickets); + + log::debug!(target: LOG_TARGET, "Submitting {} tickets", tickets_count); + + #[extrinsic_call] + submit_tickets(RawOrigin::None, tickets); + } + + #[benchmark] + fn plan_config_change() { + let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 10 }; + + #[extrinsic_call] + plan_config_change(RawOrigin::Root, config); + } + + // Construction of ring verifier + #[benchmark] + fn update_ring_verifier(x: Linear<1, 100>) { + let authorities_count = x as usize; + + let mut raw_data = TICKETS_DATA; + let (authorities, _): (Vec, Vec) = + Decode::decode(&mut raw_data).expect("Failed to decode tickets buffer"); + let authorities: Vec<_> = authorities[..authorities_count].to_vec(); + + #[block] + { + Pallet::::update_ring_verifier(&authorities); + } + } + + // Bare loading of ring context. + // + // It is interesting to see how this compares to 'update_ring_verifier', which + // also recomputes and stores the new verifier. + #[benchmark] + fn load_ring_context() { + #[block] + { + let _ring_ctx = RingContext::::get().unwrap(); + } + } + + // Tickets segments sorting function benchmark. + #[benchmark] + fn sort_segments(x: Linear<1, 100>) { + let segments_count = x as u32; + let tickets_count = segments_count * SEGMENT_MAX_SIZE; + + // Construct a bunch of dummy tickets + let tickets: Vec<_> = (0..tickets_count) + .map(|i| { + let body = TicketBody { + attempt_idx: i, + erased_public: EphemeralPublic([i as u8; 32]), + revealed_public: EphemeralPublic([i as u8; 32]), + }; + let id_bytes = crate::hashing::blake2_128(&i.to_le_bytes()); + let id = TicketId::from_le_bytes(id_bytes); + (id, body) + }) + .collect(); + + for (chunk_id, chunk) in tickets.chunks(SEGMENT_MAX_SIZE as usize).enumerate() { + let segment: Vec = chunk + .iter() + .map(|(id, body)| { + TicketsData::::set(id, Some(body.clone())); + *id + }) + .collect(); + let segment = BoundedVec::truncate_from(segment); + UnsortedSegments::::insert(chunk_id as u32, segment); + } + + // Update metadata + let mut meta = TicketsMeta::::get(); + meta.unsorted_tickets_count = tickets_count; + TicketsMeta::::set(meta.clone()); + + log::debug!(target: LOG_TARGET, "Before sort: {:?}", meta); + #[block] + { + Pallet::::sort_segments(u32::MAX, 0, &mut meta); + } + log::debug!(target: LOG_TARGET, "After sort: {:?}", meta); + } +} diff --git a/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin b/substrate/frame/sassafras/src/data/25_tickets_100_auths.bin new file mode 100644 index 0000000000000000000000000000000000000000..6e81f216455ae9dc61be31a9edef583a652721a8 GIT binary patch literal 24728 zcmb@NQ+H)+lt52xqhi}h#kOtRb}Bwm#kP%#Z9A#hwr!(t_aCU2-mmj{?lIQ<)`|y_ z!E3ZEM8f*RvMLf+!?96gw=kg$1liS&4$1dALn5*V*akULr*LUn1`=+>m6!ePaO!e` zPA%#m?01WJ+cJKD0npko3ibm0$;`N$e&uGBh!XNTA#fkj%hWT()|s4{i2{sidFO%6 zd4v96=7}Nf^Ei;wU^IP4_P5jcuOa>=IC@i|COiG%Z;XNB^|4Mb0Voy6 znZ#UadJoArKC@1RC@%Jm{`h+7?*>2Pvp58(=0#RuQE1e*(? zA{peioGCr`%q1;Nh>HUfe?_3k0%+ZSe!xP34 zbX_1Nz=zOKgWXB$HTL9Yv^Nmc3`6YZcLDTYb`o0!LcvOUBp}bmBRCJxY@f7+;HJ5R z&OM)TEy2!g?-zpt-XH3+U3NgA!J6E_GsV~d7-dVCNiN=26%6t+W=3gCkkkFWJcKqt zhc0TI@;67Opvnm-R^=Zg0gd*94qY@6+8jO5z)Bb}0Dmf=h>w@#H;PiMN`%6R!*Vvg zaM{SdJEYF3)syM5JOFuaP_f>p!5eYA*rJy3FVmZtk&(Vd-FC#El&2ab4N*W%Oog8} zzq2_6t<->xqmed4Lr}rEi-tr@96tA~D+(*{%;Y-@o{YC53AKCH4U!TA3th!Sokl)P zzjt(1s^|v|@N~svu8F+qJAltpo1GC?V6tSCk*S~X?}$)G{rKb*1W=!K5Owy_5$$o= zS^M-k7X`;EGG8oLf`KDq>3Nn6$`07g5Yvv9ot`f!N|vM=H-t$9Qw=F-jH{HPtq@tK zbb|mii(^Rio?)}irzVbXs-u*0M9ufH;(y~cmr)?B;)#I-e2xeuKg%p7J$9ar@*N`4 zz?9k=Dqzbu>QRNaqZ9c51h&n4?s5a)Q4J_BSIvMNizH>8JnVw)_Qq72Hb$gvmr5p1 z2a&tZ;F;TA?Gq|M%`ICHV_8%cSO%`-Fp6l2OENfoaMIIpFQiE?<-}DZAUb}2giqM4 zj@>4pFnF-YhX(H?tEJ^r zNOG4*LI~J-QE(Nc8R4EEW!t>YrF6I_1ci<3>w>b0C1$DiSVjlDm!afdhij%NaTQaQ zaFiv7W_b>}&jN!lL?EI@K41v}zXi|OdE`8a-EU9}xe5NT`m*UwJpjtWg!IICv8_+F z0L5|(5SWM(Tp0rpP3_dYP+^%%SNw4}T=CuJ41$t$^Z>S8chGEC_hW)ruc7HIWI&8_$?`eR<%U^54Be(YidiGY0$pJ@d--W@XVh>-(4e3-uo8B_cwAfHNOh zw+ZS!jVLg4u@4x!$%83>+YH9qD=`A5&rOLA4A;vK-=jYX5Za21QCYh^Nml`pV=OSv zD*5A)^NS4?9aHJ^>+TXQ8gfzFC#{BO5o-- z%>Ez*oKtUX3+5oE>YYgUYN#7q8DwjDDzQSg-5RtFXdU9H0HyIN3d#Ky*^!w!YzN`) z2ct9(!%B*JxYEbVJ3e`w(1jA4jk%sLXSaTtO6 zF4R-m@{U1z^f>#{{fM$?dWf!G@HoG44I((Rf8!AW1r%@z^o2bY$-#qERTcw~2W-^3 z4Q!;2oQx{2BCbMl0Rg3c{sxj<%9=GRL|QmFa{8txRkQoBI#$YdlMvr&7=U?9SKVBy zo?egFg|85Z-4w!rOd|n3q1@3K5aDF#}$pEkFa6ec0R7 zPD}A22@wr2kM@%?f}9)9%XI*A5<>l_hZ_KFhf}4y??;oUAI_yCFe-!DCv%fiYAW(= zk~5T>=q4+F7RV=Ao-owF+h|fH4^WrcC03M{M($L)t}H7~Fw2XZXa30X_}zkdhf6y2 z#0RKq-J_uwts^K@8zJagAPV>BZ2{r_AUo0nK^wafpJz6XmS|~rjo2pu5v2os80sx}1 zlSe;B^|4@L4|DDV$3`zAdXKs{j8f@L{Lq(l-~cloH%lLmdVXgkFRCKjC#{HwU!=P#q7*#SRP6 zflYs*IO~8sAkRI;dZPRr753*}A289vz;73>EQWBBTubE+HCBe`shw{t3Od^nmbi)a z{cbs05&)DP2TDXh-NV(18N2lQSgBuW6zM}u%eJJ6d`a~Hu_VyC>stUUpBD2KY>Jm( z#0xFFbn9B)3B;riALl-Vh>!y)oL-|!o9Ft>GrGCQZG>o9;k?AI*CJI5sioJ?C0@!6 z7(&M1eD!QK%7{IR|5gyq;7QhSdL>umDzjqfM!ZI#0Ak;~?+1rn4@p($s1Kl^H7E!8zfWsL6{#E4+w6$=(o1o%c8)`H5_2c3p zNOHaVL-NDq_F#SGPW9T39H8RARCe|p83{?Ws#h8KJ;-SmLxUQexRL1h96X`!i40h3 zpQKd4^_4qXWbk_zFef|h3<16$o*vxiTIaSvs@sUgG zA4%kTvYycJTcFeiq6cV&+Ur45l%b%EaB+lTo1{0v@H}W-sEWbAxgbEn0TDt~IcxF{ z%;YL$=%%}-fL#Y3#;v!>mzQ0CXEX50Ddm!G$6V0tPK(=j#l$G)@sCfUUR7+2;sz zg=vCTiyzn`(Xn=p8`!;@9y0*vhl9sYoYUBX1V3@SAKrDh`=SAuP}(*{UQ~uzFI)TE z;w&wx5vOex-}~`Qz;=@0b9=!JBq~7YlPDyw&&o%dX%GG#JEZrY7ne3a{&KU;cyk#s z0^nBD#BEe6eGF1+B;e;G5VM~n>2!*@b>N5doO5yp1`v*H~4I$#a z=>t3$Ffm?p^W{8NjZa-Wl`lX_l)et@ySvCC(E2X@>~Z@R1JE(iER?{GETFanh${Z_ zDz0@=Q5K+no9Q|kql$*Y@UrOtDO1itQ>s-LQIjTBQu4>%r7BOM0j0K*AyHrw5wPcb zanyn8IKb6^vY~_`PYMq;Kf%W*wDGHD={FKj50SdRh=w6`=_T-Ezxg_eB;9 z%?eVoXc16U?KBKmbT1 zKgRca%WXwIwsZgrs9+@ zZwjk^R;VS^aWF4?`s9?D z(Dd+X@$7v(_|H~eS~d`r&0OoRHpG)t#d2;gg!^=51(tpQX*jl61GBS8{!cP!gj{w> z)pG}rn;C3QdpEgy?;^pVr&fll_}`dtn)-@AB^RuUI)kAC2=?%@>Z0`TyriTbnvNOf zNv$5Db0mKzI8B?K8}yrf{6(OAPJHB~QROh< zw}r%Al+5A=)bhg>b0`MR%y#jved>dw*k8By8^$G0`Y+%;sy- zpRt;T_)Np5(qI3-Y}nzl}j=TigCwT1Uty}?qw@z>2?R>_rQ1cLlOh;`qaQmsM~55si;_HxXg z{o`Oo_R$Vg#~F_}gd#`8kP}!#A-z#Jq9-fcu39E0s+eA^;Kje4FtV2tMU8r$ zI5%IJF`~cgOCqXTY}v~{=B{#dTH%;|FpkGd0*8wG1B)YkGftB1k;5l$V)8Vv2;#ZNqX#A5?h7%W&l-!ypZL+beCXcrk-Ks@Nkk@#QV!ayin zgnZ9p{D|LhwK3RubX>fQz9img_#AC8(|TT;2rWFV=5W<|)iNOZzRMV|9WoCCUUMZh z;v~IvE8o}9*+cj~h_>%N`sohm)^|mwX8U>vm#S%wWXy5HaP3^lflGultut zg2TN_6JY`)Opi98B}dJOQ8DB0-0VFkt@H&KiJ)=>w68kdaOxEf)@rZ$cx~ zR@s?*x}MN|y0l)^ZNVp!`{xLzC(kv(%(CxphZw(Blc61h7;+k@%Z%2*6ZL?g{|{mX zwIURxKKjGcJ+cqm$-R7e%Km0a{@l#gH9GBI&@^BLsG9MWkXY2^QN?admE4Gb%4cw! zr(CW5;S2Hv4Zr4xqstFdl7^v$N-zzh0pr2qO)W_U$4g!&KC`K&QVghvv-ghfqBf(K z6Ft@bC`oTS!8(-1N^;7}hpEO)oUOSt1O!_xw!Sm#cRYx5K8w@2E4|lK!!kKFn0w+( z0Ezn<0Dw^ELFUG>4g|YU3UmGWa}zblMo-J{u;`&f`SMlBqI^GEC;X z(WN9*N`(f6NfTgoOZUr?WNPujaHE&LNiS$pZA2;6?vdC2f<#OQ0aMi+yP_x?M106d zk(#2vDZ=$ebkv9KE9kIiOMV_LC2paij2bz&7vNnI!Z2ouCc~*Ja@sQ zp4ZO!z@C&(pSy1^9<8~y1@4hv!Mq{w@Q~n`Je0ZzSu30>cKsV$)2rgbh-Q?eY7D0j{ z@)L=&#$YMQ?Y7mpFlQrfdjbNJB~mRH?HUQAbu7u)r`FS0EcSYtB; zRW>GC6Kp^{6KF3RLLLO0Q-Lsi~immIU{>J2W*BTa9brK>S^p8aF7M}T&@-{H80!32ZRUf8M`t( z{u&z^d$2baNXl;53Nt`o!ku`O5`y3#N7d$2o&uuYVcy+zt*aLfzqKX1*xj1pkq#NB zbzjy#BT{NS+<}xuT3?4Sn^=Brt!)>c|B-m+^lIDLCQ+9U*9Y(|@|Wl+A;)5Sn{Bb}3dV(eSR4;FA&SE<7->~R1>J)19;Im}$?(iRJOz>+<^bW9}KAI*&As9M%Ss{C#O zd`X<}INr$9a}_FYTiO|;;fXX0!@(xux&@o_@T-~9gTNmYXR7JIyd+%%mdTDoLEMqI z%lYaIf|IC*!6Lh3Upi!$*w69j;xkV9pw(E0H)X+5ZiL9dU9Ju~dEw=0J2rx)F25>o zv+r3cRS<2;beO=FusdXN*?`n*_OS@is^5f8mPE^#+?|sk5_#hd%pmv`*gRE3Tys(a z((?EO0g2ub4`fyfR#{tPUo9@%tT*1!zXz++x2#!xs_ne0S?J<<3cg+qRqH+~E>FC4oOwPky{iNfdp>Jo z*(@5U2tqCWX=@fr1N4zQMtv?9Q%Bk=zdI!Rc}2c!P)O+qS&+Mbz-5x;8r@SjhjE(~ zG^@`oFL2Usqxg-lIKvo_$;xx8ntD#k{CSpMY}-@KX`b;Wgu$`q>FCaS_qCS2K;nUS zJ%%8Fr+)A;jzw6WoE1gV9Vl>#CrqT`6oZ};y%E)ip_d>&v_o4AdBOogO$ZoE4 zFd|b(*!Ys<+&eoQJAkGsa_#g+PZxSej;6I}<}HN>H!C>->ZZO%f=74X+!f2WRy1&a z&P*UL%RO4M2^|@*(Ai$&V|%X7&f9sWPQgW$g2!=PW_#G zFGIZB0?n{O+y=@p=g5M1U!26EBEH2IS!xX5AV!yNssG&>K{?j2R0b*{72%c+m$6o( ziv4p(iaG@m**8I=_mu#Fa92QKA6Sk$fpv6<3ZhQnzdd>$GRVyCaALaAi|wA+K8eR5#!)*3V)U0Nayv}McrpT%jwI8Yh8=xK zOO=m++>m9E{wkbg&cU3#ktRxS&3c_1pL0`<@?%I%UK`TMiOk}V$z8D25i%hL|Bg|H zG4M>Uyj;O$_S2xpwfRJm=8j5K(C{r<%iFh?cZz<|7mWY30{rt4_Fs>*H@>Fe+b+^> zw!=_VkC4pUHuFNk%?~g921*S>Q}(Jm=ghM9R9 z(5xPNQlu9gDHsRhp&-t*U2$!`1JeT)72BNkn6(UmA(WmaQAKN>UrUf^DtY;y)-c8S zu1W)T*yc+Tw7#!zIK^2!cI>*GDtn|EsDN3$FQT6wc`D4{`c(0#yTHchof$5le4|W;mR+3O84_ zZjCYly_paQ?oozlx(*~`F{ktsVxs!o1H*5BCC;M;$yO*GKs?#{3g7AtLPbz(WMyxA~q9SCG zM77387NYf?glDArE>52ID?*Y$6ZR0L5^FWjS2*B>>at@+h&z!-tNZskV(V0^-4kwm zL=3-}xOPat_-=k*`v^m)x%N1LGe@(!jz-yh0p?gmagH4&z(g#chh}ucd)%`*^0zpJ z0RI~={w2=8xF310przhJ{WHT7(TwreW@PZpq$SwShDN&JpxoM&4)9WqLYHf=&IDCm zChF(=D`+h`KWI3JE=Of!$dr}9=tRA3!_0B4O9Z5Lh{v4s7Us22- zvigN{L(U!#wu2MO}#h3OZ}Pz-xlfIR0Jyhcm^!7tXIk z_mAX~-hBW?9OS!GTiCe|o9ZXCd(0kk(5=YhAIWI_(&r-*#fv0cTx%H*S!$S2Kr55GpdK~_|P6c&ZR@22o5YNvT6y%zqtG?xiW1M)FzvQLvdpV49;^yYgFC=1hI5t-&HxJM*{7r{{XF z#r%*-B&TP#X#RQZ)XR`cFsUnpy{{nQG!;RjF^{#B99N>c?5qUQI;x-*gz)ixoFjf# zBh_{V8iMI+lujQNQ(nR(Xax18Nf4KJ-~@T^Ifb>OfAyj%u`PjLv1W{jcNByB2grZF zmEb0c^0w?j*nJdFY@%&v}>OsAv!!i68Be9}!!N2mbpPF%T1WM%jGOUaL zz*w7fs|CqP&ezipW(@A)XafE>Ui?d(e{ovB7T+J0Vww)Qf`*Ae%!OE-n5j!GlL@KH zs|EpZT^t}T=z%Bi(Y5>+D!uxX6Zwgo?H}!G=oC5px(f#8{%yF!&J_`--Pz3yEX(67 zo+si-TyuVgtyxsCS&bsW77oZKyO+R}bVF2s6u4_jqB+$XVoH%xk0w5E6=!o{#RN&f zZ;M)I>hljAsnR^kolI1haI7EwMV(S-sdWj&M;H9s!1@5HHJ60YKM^V*n?#=t^vzT` zT4(b;4b$SMiJgxf^<@c@44@0MDNkKs&RUmt``;%^IMFoK{E#oB@YPKHPy*(g`9l`dxQd;3`rarg~;sZr;tAFiWopdh9Iv*ZsM zIJmSG$_zgN$3;z*(~?=GQ+=|kfZyoNC3728*dL9U;YfIJe6mUT;0MKGSoe!h6G09a zmvsMjR!B_5NI^hs)dX9(X76FDgSk&>5lA@!1~AqavpRC|zjn=_!%4eE*C8-KQ2U;; zTm1ViLvjmt2`;UQMJXaWcN7Y&u;mv)BkkCzg3Mb)N_Sqbpuvd4oKoE z`4(VcTDP!vmOlAm)s+5YiJl9!Ow=^gsu{mIe6qgZJ%Fn_PBTkziGsPp9upj1ym~_OE6zbNts>ROI7Z2g;YHjZKJP|5q z&7|p(2yaoy2>Tb7@DN#<5j#OaK4lIg1@Wg&hc#A zlLo*QsyAv#oHKtH;;rhjpoi;tPbf$IDs$V-y7hU~Pb?Dx{%^eampK38L=RL>@5L|r z*lpAfEHp5em=v+9O53<2eqib*Fo^7|0AiP|&5u7BJqUnI-&wK=+MS@`i_;$=8|I3G z%?GUj7zu@zJQZQQN;|&S8E)a%``)c}g&L`+kivuj%|JUUw7;=UFYmhnA6TY-yq}sC z8t#amrRylP!Pkae44*tb1OUu1ib0q8+-OoL_eGff6xw&Dl8-JQ2MHa1`l zT@N%jxRRgtQ&L8(u)`#}_LzD{~p6z{4~L{L7YrLIhi-U>VCW1#67=vl1oANUaYa(yz-Z!2e3 zb~`R=P#PFpq%3&z0(JPOP1z}uy1kTrH~KH%cK*ENi`QsiqD0dV?I9Sds%NH4&<#a@ z&O(8HeUI=ZBpuyXAH47=WSh%lWX|=`($nkdjl<;e}|%(~?e% zUGh0NF=ZM|o!+K`sZt-|(kE9OtjP|OrQwkrTX|(pOr+E{8Y;9{n_c9Q*62;H4tjk0 z1IvfWH4;>c2D8}%6YX%sj_Bcs{*%rYjI&aO z-X11+uj-`B?3k8)4!^d%+P-D-6?uZ`_>U5(p?N21v7}Aj75u`7WzO885rVOf*&y@= zNw_A+B}ScqLS{dwzIE5D4XbUQ4XN__}z)uX9Ts#Xg#TK=yfoZ|vAH1gua! z_^5NhGwV^|*#mcelJ#Z_O?3`9h=_tjWScqPGxuG2*imgD+y`%70Q9ljC0JZQ|1 z?P0nUHkA>XVknMca&KN!=%VGZpb=hZ3m%T+B@M;Y?O0_s)a)^CL0O@kv^!#2S~n#T zKx%LvbT0WS+#DwROb?YHKnpsTq#hXE$=z$x_j8CGp#WGYz`iqt9daeLmw%cz%y(Du zt%1)`Sax-G8hg;{Y&Zi$NgDh9l0+b$%ZOWn&f9d-rS?kD1MJb(-o9<4#g#lrwI^{r zGRLK*@$#*7U9uQs_Kss3lU4f<>%NeW&=^(=fA4zJ*DLMOcWeF7_^W1ylZv-2g1Q_y zIk9l!;wqfm*q~|}G)}u^O6JCG96Jk_L2J$>T|l12@+@{YWV{{{&_c8`UCjAZ>sl(i z;e~l$LHuJj2a|PC>aEbI)yfmW=Y<)98OSO~p}e_1^n1l`n^I{AtwKIhBLb&!b=w8d z8YHPaTuyvtnWqRihM>e6xP2fo@N&%r`%uCn+&qPy`C7kHrLBLJHJkN<7(`xP_wGT~ zG}9?nk;tE^)~hn64*$zEK8hwOdZXU5V_`fb)}EfKuJ-QJDy)Ln^U7#Q>K2PU4%IPR z)oo@7IvyNX(@m`wIs~0n<=z*r=gq4w5mE4d*}2dgVa0Rrj{#BHC;wF``6{_00^|P< zq5nwL@-WCX=<9;jWEH0+a9bI6Cf(-4_2-2FiLJZw`-_j!8qVl}V29M2)f05+B$wYz z{^S?h$@5%R-Kao#FNmE@<3O=gHmBu84@FzuM$R5Tr)+H4sD!SjV0j7v?AO~pp%0x8 z?Ay}MY+K36b&$ABTnZjGbh)hU@jfdd{2r%ElsdQ6ye>lnzSm81v^X&#*Vh~>XiQBkGQvIygarSA%FCX(J zoL1Nj7?t$Q%cRKUFPhsCTP8UEku`qh+P3TZh`{_;E$u^NAN*herUmx6->7EpMb$bP zlrp%+KKoe_#$3cOO?Cr&Lk9StSN~iP|JMRJ#lFBwoY<&cID=fL18o{U7WGc2%}_9B znh+#uB}b4JkRD&_u1QDua9u1*I|tDMKiv&pG6I6YJE7jb!A&|Fd{_yZKPafISad}= z7DlNE(~~?%d?a11TvnQKHLa%>Fouo-Dk-9ypgb^IW#GaIno7Q$xc)Lm(vxt1H6_WW z0Q{O(y?4i9nGm-tNv*x;?)r4ju6y?(%cbEAKQ;HUk^x3*twRsvk`&?Vs@tgaX=QQT zrIR0q+ZYPMAZPgnl%3ZRuv5BFhMi~}wfIV&1@AO6W?&)-lE@Ro@AKJe%pS8Q!kJJ` zq9!m_!Y!t5)p%m#Ek9`Xkvztu_fLIe(a|RNZ--1mewnTbIS4%ZjyvH15rn|+NZN!E zypzSVzka~2l#2|x3T&6t6doT^rNbt_W94*Kgfc!^vAw7m2q#7L@zRlXXJilFq(VjW z&`xum)?IhQqpnYcYRjJa-gfzvE#iBa3`*r9bzFB^)PPTMBliq%Ru%~Lv>U>nzB_)5 z+Zzwf2SGQpQ(@~A0acO5L{Qyag%D(t%qP=By@Zj`_+RG7%*VBiEi660hvJ~YI@kc3 zjbM7x$FO9-B8g)uwu8t8A%W9K%xnS?!(^jSYwCgisAc|PHD}QT`bd%?iEUWd$ZDow zX=HmA4NXsv@pBPNmu^8gY3tkT_$m&I?0Ax;8ic>VIjh+G$cCXs6b;JTi;5s}J5?UC z1%yJE^Ss;iV-^5@?vc|*&m%3tOnG#eGBZCGyv9> zsHxcYKPg^oV+HHojv5>i{6AbYLGsz9=q6BvkAF+&o`4;}^OmzuIbYWP9DM2w>9S0W z!fR+ZM`;XkgxT+KkkQ35rJtmw9!VvY+BY4C}C6UeS$IZHrT+EYB@rIPSnKxzj*O4asI_OjF?;2ZneV6 zsAZ1lK&6?bIiS|niyg!t`tf@cXg{feYG$<6@PkgJe}H% zB5l^-9rQtom=dbBN8cdM62mfjYyW1R`^a2rHx{1n{)*>@oY1%7l*fkwLZyKQ(%;+@ zH{>w~8iMwL=de-JoQ3UU1Ab@tK#e5(%rlkLZI6umPNsM?fIKa15Mg<1vL*Jqg98JS zLFP7)at>{^y7~~BRJW|}!1-`tc@(Zbp1W2Cumd{}(yv<3$JB5`LEc!UF9{3Xo!W=Q zr?f_Nhm?(aAD{FAbzKe&$C(mFE578dUqo{?1@roSDqp{hT&$^J^^SwFpIIL~55?#Z zU$pW{vr@q_LvlPdE~hmZd>uI4dfd(w-7pm4ZAn$y*|=Wcvf`3dxi$yDvs6H6Uhs$) zYqCsK!D$e@D}&VBV+Qg~IA=RdDJFOhT)vV_JWnou7 zX(u32nbaWmUXa<6lm+4Tk%Ak<_NTCvMdGe=nSw)-iYOkTq7QU|Yad4kb7cLcu97uX zj&P>k-f9O?cDx_SM4aBZVx}3|Fg;EA$yd0*% z1`XnNZ z9?mImCKb~}5*W>qZ-k>fAhKn}*luGYjou=q&C@p_9%E=X5jic)Pkv=9SKTmS?l614 z>c3A9P3Ur>4-0CNVcU|}T>s6Ri}L{sg(P;WP9nZ(x8<f0!@(|$TMMLt_Y+y)aRH}C#4XzbM*6RWcYz(i1}iXNNC`n9dg zY>+SNNm5xwFA)cDH%Grjf)R2DetaFUuY?8&KK|fBp=hyhc6fyTvh_^l5u%Hp>|=+7 zfD`eotMQPIvwuU6nR&&U=IG)D)vd>cc{W_`5M+MW2T+PGNJ>;JzC7U}z+>0XeDLm5 zlF0{MG$CsJIm@ZprvuanElLcX;vqw#CHXuT`DK1IVU8Ri5byD}py68@!F@q|CooTl z$H!oyK{!ze4Gpy9c>iegaOd+{r5s~5@g-9?)`Zy=2n@W@NO*$Ae!95pSrOYd6C;OL zYnh_H%K}SYb0qz{DkBH#v(6%{!XaqkNGv%_)Q;)U>;=>iE%i|++TE@c+hV_|;Z-Pl zMm(*&>ECS4GHW>f^ezAnNmAMoN}W-RO7UE38h-0E*$PlK4jj1X8a(+>ks? zQK5w1IE2vCa0s$o=DR@m8GN^I?nKp}-2z_h`7}PqUReYh!g}kJK=>`%mlN~RS}P_F zDhBNxxdqz9iM}!t_u*LOV$1cN{*t6o9#FrTJH4InunaaAFULQR=sc>vDN`T3#y7tP zwkYtcK-N~OWob|_7x5MiKbWIuB(`Eab3qH$E_uG^)nWhjj27Qh`kRWtZ~k3Abw@l- zYQQ#L6VPY+_%(oXen8g75Vrm6CN40^zv3?MY%c&>-B99z1dYr>INh@0;s99K^b;f+ z`vnDlD}(T$jNf+L|9y(*q&;X+I0YJw<|Djc5e4vihTCA!1 zXo>rF2kHhA9A&xI$inUo&+vfmRKQ{j2ZH|QWSy7}KBL4G{1c)|GgZZTjBKrMRN2%u zEbIhHd<9*c{?ih*;9b$VS$%UW4P5H9jNOqu5hbl|s)qyoPZYPIH(OPdDIB_|7~9UOPqgkA~cY$E}_sHkWSfjb(|5+!s&7gb*&u)coh~i zayDTlph@B?u`~=XoF+OEr(V`k2j@uj04d|+FA{&7q;7Hqw2(jE8qkf`X2Y#GAX|Kd zfv(6KdWH>cTIEhZnv%@l(0HGh6f5nR%AkQc67H2po2suIKAl9i9>ZRoBdcM{KLMMN zLxFGdp}~otUed7|&uhhY>N;$hMcj*---$DAQwG4g^OY^=Y+(-Ugx3}@)Gc=smFe5m zqJ}cY_3Ei8_$=a@bjYAIgD}W;S_-s%;j!Z81zN&V>m~sF5?h5uo@RP%-Lu#Cc=(sI zKB%9A_Ad1wH+kXU4 zZB(iVbb3KO4@dBK4Tn`zzJ?J#e&4MeI0YK7s+`}V|74BO9B!f4%hXS(kUZ5Fv9P@H zPrbWz3Co+l(PsU#y`Y>`6FS}pQ<{&@y57XkT0{-n;(=eWB;jB$r=-|N0c87vN#e_$ z=8XMerUgcnBZ?4hg?tHPs(M$3fDAvF!5?J$E-Qhvn$_5NR2J>*z)`bX7>Y^aa_y5p zeNn)f!IC?p)r`mS6l%`4wxOvpkY(M=88j)1^vUqzciJCo@M**3O8qSzf+_dDV|7RM z5V4AQB~#^=Z*-COiRm2#$y7^0>wQkMYNzt-+X2c`{*EyiY})2Pfp?m*YuT7S2_lhI#Xf7cpbD zKP9l{%Mup(Hw{gTPc%*IY;wFyAQ^|$8zxwmsea)YUi8_iWl-2$Cq%zgsj}+}S+Hg0 z?w@*tFj?y}8{4dB2&v)MJ~CDo`WotSv*6~UvxAm-4od-@z;6JCC;20)C!*j|91Ph$ z_$o)(SAq@XLA6XvOK5T&G+ni&jH=MPd1u``rl=nhLt~A?bW?M#ey!VGmMi|HwdB+!(VUrvBCxg6rFR1 zpNsEdg^}sdeySIq@m2tEHt?L3+APKunZW9-#6@MzE@u&Xy(n5UOCUt<>L@LsKn%Be zNpQ*CqQi=6BQjp-vc*M>&#Du34C{Oc%hLJaxoei7ndY3ZE>AJwnDX)=0EQuzYhiUL zK|POOtF_VsGuELCV?N$jd*f19MSltNk#QZbmy20KV#Zd!ZZ^cR_%(`c-x{=S`JvP- z;43*Hq~f{)-e=?&UmszO9C{6Drar?jk$u_)eTjA^yB_V4G+!dQzwohsrVz8g{{<=i_Zn%^3W4uam{A zL9NR(NgNC++A7cu1Z`pADHJpg*f?`=jefIkJ{5_oiQ!vG%ec@<=5*zG(H$(&rU+pJ zhr%aa#K#)cY>e-e@I8@>NU=08rknW)l5uLb7z2LLy>QWFR3~oOuFrmO0#(X#cle z{PTGIU&rkc+WKCQI)iV&Ua^MEJh1CCGOp^j3@6a{P1ZtA# zhvToYzk^N_)sgKG68-D4h3-BPdVe)5V)c`z6;a}=ng_mL+eMB-3^Y}?gMU-HSUq=Q z=o3V(;KAPt7_YP$@Cvr}X~gB?n@QA)+i=nm{t)ct11L)eO&24AJ*w*|+V-zjPC_Gb z_UHUJ>@n!nDa8_$$N=lngX(BdJzN5INz=ii``_5*GL!JYZ-k)2FN>Uv+I5cKjrL`J z10KrPS_DhCWhIq$%iZ)KI5GJv;~RL_zKh07^4k{`{y$A_y*9Wy-*BLsjv$X#@$qe%xFYR$e8B2+PF1h`IR+`{IGmX$z?NV?2HhQEmtz4&C&_P9_{cQ-kRw0b*%kKQ z*u4JvmT>#jTFI0G6%_+C4#R|O2%Ezwra&=?HrQMvlfCSrJ;M>6BH>7Ja}qW-`zct| zw=0b1e~|wDt%GcCq>C!Jm3(fh$lI7KZ_ufn)h`{T5Q|UoiTm2ii_~;_?->$ z)Q30UDrN)A8BoqubQ0DmD-^3~u$LqKy$k(&ATWf70G0t9Z>UliWSF)5)ynqUI%>Yh z;Q2J2DKYhYMu^}yVbreK|HX@ciSsX>4s~@d23+gHzhgs{^U2g)B)gDmQdUbWbVNRW z>QK}KaClJQ`(%LyFpMN&20AJ&K?1aPg^Y~56UFY_6~W7(vfsuJUyD~CHjAF$@8#hC zi$s45SL-97HZY1NmXP}3oyA5-Y@QRCNv<}F; z=L;Y6EVu7i7-HU6=5%D?fL=xlP_qJ4Je4dCN+9b9p(CMf`bm3t(c$N4q9d;h6*#W{ z8A~Ic9?Izi8qFG70mnzXTUk^GJNa}9Jd1~;uA)EHAT3A1ok{+yZFmduz9LT}2j&r^ zwmSqlr`j$(h?D+iKH4m#xXbohHB+dzuej7DeR}Ty6mpIab+~N+&TiSY zY;D=LZJ+E~uGPu5ZQJH@Cu14QT#F~adw>7HK3`v->&x}r_wBqpp@y{6=5Bb-*)FQ> zh`qH30PiAo2I!{dJr?EZgjR5fAf(=8(oo=iQPv+fThs#4hfP6H3gF~cx%&CFCZam# zUp4)(AiJbDZ2Xf<7+V7x#eO2Lmpg2_J<_N@l`Z>mBJ;% z2T8&5aWh&Mvqkt@Yw>L^ep`W~L~$du;pI@04z$Uyjxa4RZ_?g}oLFrRO+B&{-S9f@ zEzznHV)i@-ye~I%it}9+-wQ*|50ji%b?2^n! zX20h2#Z{?spO-8e0V`Axg4%d^r>bmO~OW4>e*G@nGYk;b|snj>1yaU z>*4+B^+_c8T^qP&R374aB@$+ZpmPP!e(Riju?Oh425AB^(WRjL`uKt6KWL&GJL0m*GDjz3; zWOdzwUdE0hD%yqy*>hRe$ibS!*MjA);NP&ka@K(Am`xwLEf-Xw*8l~p;N^tS#|=ZV zx<_U1`2NkvPmbFAuLwd!QZtAXVSna6r(p6nuM)_*KygI6wA|E;E;Bo}Gjzk2*|Nrd zD3dmQJZ124&5aY*2_e4g@4|9>L~FjSogc%Z_)X(0IPP6%nF%84MhL({@jYk-ZEyn@ zu?$V+r{C1syMme1x(VYogvRUyjkUSyNL+PlJ8p;LSg6~z5^agGbGK)dEsojPI1S(U zwQqRKvyC!!$Hdyb5kKXA)!jDnu{1PNH{!go-bR0xuJU`xe`mAs3( zHo=PMwsm=2By&rQKFc%&_w`Bg%(x`79F<#-9>Q26y9fJUIrb{JM z3c~4~D-uF4hN=Z#NOA5>WK3J0{-Vry+|>di@TY!5Xec%Uljhi2K?&>5DHpDU`7yrQ ze7?3cbelqgh*3#l_OhPmy@lTE*U{7hMdhK9;TBjd!e|+A6(z-0!0UKS_FEUFkX+r& z>%n_V0&|0Fe8mv6%Oug=m`oDNzE;mVIr`iB=FUU=#*%2VWX^DL1T!@i@x-bNTdVM;xX%uqcieG%rq#g7mFiLXENt*Tmi>mq z7-nNuDTgcR8L(yaPXGRB>&rv9EVO3?8^e+DMVK;MqI)>UUA@9h^GPjC=GQB}tbzhTBoCj{4cUDNfC?S&)~Wsh6Ly;vZck z@7&6Uoe|lwwMmm)SNhdOVn8>JGPxuY7Q!sHyXNOVwrk$~x1YrFpmMr{>xK?#0m98f zV`tlU=cVOS%?~LzrZvbRZfg=&!CB1=db0ut9Pj-<-Y+Q7BV}e?2J`dd6f?zKbo#5w zdOb%e%n>Ksp;B3Ko)tQKh6K}&>6-jU4)`xUe^-LR?uQxc;G;hB9r60`Hjbuky5?H* zrRZAD3d4jyk~+s2p!+W3o?@Mv{G;3Ng^msi7w^~$+afyz_tqDkA9-;0Ubc1XUm{UCp(;@~vRUWdk%diIa(yrO;i`_LKY*;Hc2siToU zTCAg}8n1W(R>bnR9#W~P_j@btTFlY%w4>1RbK1VED~Mc|IoBU+FsKlL)=T+(fkFh{ zhYw349d&VIxqFLpa5Ft?8OlQ-xEdFT#4ni`lkZOBh&e$N@z^1QeJA1}{(x5do z0)qi2cJ0!wpGxs7%Y+8X@QBpv5wqgPTMoSqq{NN+GXv#Ay3@e8h82{B37H}XLK+Z- zHNTwrGs#K<<~LvKO%#{0b`-H0$->2C+V`UQ!c75A*LBLRia#ru{Z47DMVRo*W%4Jp zbMA_K)ueXU!pgiz^`iC(UK|&8o))t~PPt`20uIN&rDDmy4W|O8oXmqErr}Ny(2yK~ zmZoUa!5{fWLBlx@HyA7x6Rg?4Za(eTbwuh%{lN{u1Oa{;C>ae<@pzHmzH-`6vyO=5 zG|rYNE)m{_UoRzfZHRz}qNRGp`LpiztrX@Z+M$w!t)wyn3wIU@A_QV%@ewNvEneY)#fx|T5Bm%V;QAIgc3Kto? zIA71j--_z@vw8($rmz;1W$hA{7-@{WHT4ACkhR3CEaR_)RqJ1LNs?3{-G(~SQu%L@ zec7g#7TFS7Mxao)muB2nXt<8Rr%9c5=}VL+Uv&c)CdU6<*#EGuDnz-^TK$8v360pk zM4%$ty~kcUVkzKfUP-pW$i}i$6zJ!=|M7YYuMfNEVajZsHwSx|%O6T@XNi z_qF1?#0$fc-_nw*>!4rU8EkTsW?E~I%_kX56_~XQK+0SP7~3pcavSgQ07h1FjqHH^ z0Gm6H6<~-fP1)LOt4fvmSe)GdIbmuE`Wx{7@#0^H_5Q`*QZb_Yco2Kjjn+*E!)G?k zi=bu3wqSqb)mKhYykwdKUHz&Ms(c8;dPlf2Lozfa)L2G3Fwiaxd|13tr~d{UM8CWW zmek64m5Qepyp9q0LQ_mO8%g7^OYSOHvcJFijz?6^q>M9X=fV>R=W*cFEb@ocU%D(+ z^T^j6tM6a*Agt`OQ{~#E4<9*J58Djh#ev-5E#rjc9*iR9hnEt02k<=Ct$|*}4dLJhJ+1Es?KrY`;kI-OMDE z@%fA4_qzhb49*9rkSWcY)V|hK9^LGc;R9P7gvXl&4m`e~QSc-_+I5jU zHksnLZO(huhHr+iEAWz!8cc}dD}71HtAcmk6-*ACkJ1!N%B~j+zK+=sb&iG<5Nf%r zrFtna#5Ok+z7V}=zI*m=(E4rr=30o3H{@~!`=amm@aQibuQ}ad3Gz*&KBO^9>4v?Q z{K;!f4##DQdAFoqG4D4)LRD^CpJCu~4Up?);&BLZ2&$;6Bh_Y0^>z=9_c1-y#_hds zr*+6P0z;6JY%u0xxnwPj^73g*PC?l~9zhJ-DOzXU-BL?1UX8(${B5<4mq;;8UTne~ z;`ndOi|aJlLT9@rf^h)=U9CjMLU~;3RhRfY3;po=LX${tCp^9;+Qc{;536XrSUmx` zQCWg_i*GJUZkWLomQu;7+2cRmrAZpiPt}l9<>>bX1J;{vnaZ^bs|kza8w$oeDi3Rg#xPkf<5@(l#d zp^B>~RPaEF)fSeN(VRviB$e!{Yp_5aW6R_|_wA`P)LEv`e)JKw5D~7vsM86upA*Ng z@!!uyjHvp$;Cz8G?Jv_h-f}--fGo!JZX_+YE@^skR25KD??U7>l-?lw@8-q7#Q7Ju zxWKD^0~}R}FZOi^;!9Vhlwd1&U@tV+Bw|$hR&WtQ*o}^Sm}k}wVl=l|3x24%F;5#l z5V`^CtMS>O;Ww2aTPuab5ch83rr<3wa&Gz8TJB}y=c3GLCOAkx z?J$C;g?T4L8v;El6B;WJ1Np0i6=K}DSh{tMDO5k?7iq!;&p+6LG<#i; zhAy8s^&qMWL>kE}*4GxQ$epk$a8F>9<8TmxqlWQHFF*HMD=8~%q?p7uD>j-R$;nDY zxb}iPipmAmH6G8njivXNVp?b_jYRX?o9Dha9=hB_ka`7)Axpe)-ngd&U>gPBrkNtl z`gk&-e2%=7gXRN3#f;^8^1TzDklBX6r#*9Q4;`N|bHhyal<6QTDxa8H!T!ZwO(u&X zQEG8n<=sz`6SWnAG|4j_9RaSI4ZXY%NqfxL;icO3zMb*6ZXtIz&?qm9Vb9!43A`$q z96NXq6C(?7Q@0M#%EO=8>&SowFw&al-!s`cvwOX`hJ}8262G~nsw$51r*mg2S*Asd zU=Uyi;*0#svRr=TVVAu-!5F}8|AUN6(VK=Eu||^3yHF>;7qG#an$Rv96uJ zYLjcz8NOE_gk2?6u-swNNMXuDlZB$Ary?M^b#Toh_~ewvoG^^mAj2)xt4?_lgm(}q z@+aAKh(}^DnM)4_o=jAaO>j-Tb0YGS53KCdr}b}8?Y}^(KZDvzWWxpv@9!6^ch?0c ze&ixImYISiNxn}IOm*W#XN!iV-h~Byd?XT~ea1C}y~ zB@^MFyST1(DE_W4C|6xYkkvYErKsut$&)s;de;bzhIS~3MeA@XV}qk&@I7%ycC3Vx z2OPVLf-cztPRGlJJ_rPi6R(YUGZ`XZlwLz&dJPmYNf#(nZr3R%h|P`bnTz18__!EL zSV}{%IbR{=fd5|``Ik8V;@6MhfPu}j7^=`8WCH_UiE`5Lw+6#_n|A&S?{U;Ua6tJo zWh81t=u;AgVqW38iufG=Z~CaGX!hJBpHFmlxXM1T;CeP zRCo-RLo!S5$G@wk(_~c*;8@;|{t{7vsL9(SEFlfu^#|XW<0VgZ(wx)>kn>3+(ov?d zx{=}QpHJJqmDmnE?x>$-XW@x91^cf}ugv*b6ji%?v$Nii@)%i_GQYo-67%l7s!uDwIySeHyF} zIF|zC^TiQm(2cihrgNKJSld?bi!aj^t`BFQrq0;2ELGMpiH60@qEdEQ$jveN=TsWH z;OUPY2M1|HT9YyHAOwG}$M(p*Z#ocm^KWg?6*6Z>R~CJzi>!EAtZ@`+0k}9I-_XWk z6AJZ@z_?pOaj*Da5kBMEye(yrm8^e$6{zGbMpNs|wV(v~|50#dXyF=k+MZ zW-&t!X!`45Dcri(F|^D#+or}x3%{9&er1h6>fpZKcR3CsHB9M5Tx#R zaivKLMdtl&KNfEzmc|BrLg%wwyqC@p- zsbIk4dMOD}SD!3SresE|8`WQCkY%gu>nn7V63TnW_%>#& zvCIf`U0!`zO?a}U!B~;n;@q2-=}VX8i0+)_?AHN@){|A}>Og7pg4VKj&w~E6`#w9Q z=W64BD0x5LvOa@bO(1b}855oAeFw`7w!NWTfI9o%FP+vmPKs7WPH++kvQ$5~h&V~r zlQ2?;mNFw!IQ2=jnF-7pEXAE4|KKWtr-ZJ}c8FL<)zfnK(a>}do1!VcDLe1KXB-*U zhPC^BG2@jusCWu@=22gCdbenjdFtg%p#v2yCQ*x;;2RTA80( z?)@3gRXaQ%o0Ij?88@nKUX1|VK-Hj+7n-kxFXL$f)`d<17uw==A(z4h}N4#kIzsE3v;s?346F}9ceYQNRu9cQ0ge=WeIzDmT!qN zv8k!xGuPZno3Y5QLuAQ&&$K2yb@q)ZPt;Ya_Xk48=^)!7g)mwnq|3;4j2nTtK%8HFHalInlQ4x<$XS5-2Zz~A#S?zclx0eHBXq}M&Cs};QQ>& zDbbZ&DKCDZGmM*{-N53Q?P(*ZK2|Pdx~)JWx5&8r;9x+7vyGs~#p=I!@h@@y#T!7l zfE|GQgpgEkGznFkWi!F*EnTAkf0d$fcrE6h6_5=2i8};Vgr5%0pZn(fUGo-#F=sj@ zEtheEJ&amNA*yIDcT$DPBWxd)wiY%CBNx8U)cDz-E%UcdK=grSw8Cn)u2rHU;=k%Qp%YY9ieYyx;Y!CHSqi3t<5irfQ z$P>(hcQqp{yr{tlZ3pYAcWhOpq{j7fop~{7Xi1FM)-HC)H->x6Mi|Z00;ygc!X3Q74H8D;3wJV%--Sd64wZFMXY+Bm51u`wZb3^m z@(&NdeD`?K45OlF-{a`powk(bX&Oj*@$iakZaJo+VOe8#-uw}BkK_Rtrf^@(gHvmp zXF)`YWi;V#6^A2Mtlc_eTRpFguK@ZAbN$QG5>qnrMtUh)7?l)hcugRX%J9BkW@fgHX@}UT_Pea(xr5YgycIL z?)!P}=bZPP^Z9)~=l$>5y7!*zx@OkQtXb>3VrCPst*L~IMUDl5KyX!*p}G(VvL*zA zG=YHz?x=nrUj|=8cE&39Dk^Lc3~&KzBB4T%KuHrB6#ptufE&n2C=e8IKL)%BK^aj8 z@g_&Qb^ZMqlqvoyr-3rdUv&T(2rm*c1Q&egfHxl~6NB$^@OFy5>8k~l5igAEf6AbQ zp#1l@l8S~lEgu&z4;Q}>XwSpPFUHF&#w$q6D=H?)BPPHLY9qb+_lOXbCO8;&CFjrJBBFbjfvC3Z(?j(S}s& zTo;d+a8(i#3l~g0_H3#*7M9*rL{`vZgoeLw6k%;tj=xgF;drIzO&@m;%FDvv|04A@gIID^(8)<4>Eiv|!3$Bev7BzmOJz!% z7$+ikf5)bQ>;GfM;^qzm;H|Yj}z7(iM>$8`LtUx?h!45RShdr znl#}8KF>cQ;o(KZBimK^`TMyCxnJcu9UY^*t@CPih8AZ48T5D3H-;H&sEgUSyK-6C zx?9_E`MY``77QXG?eAe_^Uw}PYi;M?triQXC$b>qv0WM=jfyy=w+uD zsHtxg_|QhwmQGpc9pDDD^5=B(rbm#tp#in?w()ZEfH}Fl(IRMCS-bndBd#K z1SwlU%8Fk|&`Q{jhf~m=j|YrK2oSXr5abjTwY9aj;pev!<+r{jWosj*=IBVzSyQl5~7rJpaCSSvLvw~W|>;Tb!;jsSSILyw&E5>s(d;~cScUvd>fd8-B2<4%b z0QJF=D?53E{sXRW-Ds4ao#)Nf&83UewJOolUMq!|mCel*ysdogY_G=&Xx%)rakO%C zumkAvm$?3Y-|2rR3WA~{HnxaWuJ8|#0qt(?0q_D?$o(%c{EKG@*!e&B`fDxz5B2~^|F@I>5r6;7uK%*@f5d_R zQRV+i*MHgdKjOgusPccM>;E@)Vf{OtvU3AokRJ$^Ue|=wf_60RmPc~pQo0eKOKni z6nfa?m1d?nnZ9i@TI`}r>YhrBL5PY^zyMi#LGhwycjq|tSMT)bs3iXjY!pKNgNsrO zt5g4`)qwe);8oGkl=rapcc{0)t5Vg9hyZ-^iXg83-#>3h5+N%6<8xSpII)*3Cl0)X zi2iZ^pLPB><^ShyT+QoUm(it9kn%!~CUGUhU@+oAi31#|^F4W?6}IBy;=M2YgA2KS zb01iHrTum&^8!}xntk+tnxp}R;%Gh;6cvqk$O}L#(abeDtS-{UjFv-MUbcCn>sEHG z357z1GkTaDCq_pj-9t7_l^F;LCoWr<6pgD=e?io#rK+bLs|o-(P@IiM@zH{_1W%asxQEsc6ua_XKHqiE!duW zEOlW%PD(1!Et65Dy;HMyvaI!n9W_ttg4!bFQ1Ehi?q%h#_RK2&Rs5nsP* zo_}+{Fr40uv5H%L8AhGKZK)_h-HC!P5iY)SqC0H^rm^2c;r<{n5UbH`;nUMj|E=mO z*@?py8>!PZ`W%S>!oxt^v!m_K_Q=>1OlCSdw352dKgIS>mM!H!nDvzD?Jf@GN0L!e zy3D=QEXfX=Zw-@HA4P?HJf8b?5TDRZcezm&iqY%&)_q>8YZ=q5CHSuN#ky9$6S-T{ zcKxQ*V7)8M&rjH0Jm`o6G36g#qBEYoo@UK~xK+(Z5D z;d65qTXW46S;d{l7xlaDl5-))RKJgp7r$2*iiDgBT<+6c-EKW=YE3lbYFM!e{?*5P zGC3HE*8gs3em=$c3oVSEa8+1B3hor`cZ)E>AkV zjJ8@6@!j&(uOJCNSyaN1kGH)1X^95G!N*_jCbfhzrS|0dh2G`o9~B9^q?BHSLc11f z4cB4he)A#tOw7!l8)HFhyEJXHe)Bb3Ht1+*Al@9p5$q3G%}sslimoMjs9zIXW>~Sf z-7w!1fr1V{llq!>K?a$kJ~*0Y9-5s^+6}up(>5T{RXyK}mF{ZV&FiSPX{h?soY8Oh z28GFG3WVP5A>4Vy^911K5m*j^k+$z+6RGH1Z@j`yt;If`1OfC zuG`xG>SAkSjdadr?QB;iCU&(fV z0JLA0U}orea$;iQJV+>8K}iX{xTIvl=P?jtNLxi!%cB(OOR^%}^~RLh8V9ZEO(dB* z6W2K*LBaUT%TThIh`{D#@V~tsDJmZ-_0nC!p1jHLSWun_vNhfCl9coIZC5r;JWKRx zc*Dg5i#|2(=BNydz_LGw!mWQ`-p?(HbdezEOQg(uiIzFWzsv>?(s8Z(^JFwS{aOyV z_r967ww39Xc>MYy&ZB|Sn=&7vL=!?>UJgGyGdrJ3x63!IsN5twp7F--;AzX`ci5|W zd4U5Nnx2mAr3s_FxIl4D5ybKCJ1|v~)n_A7*U(tjy8NM)j#~oJu9}!awuG5QB8JE- zQkhWzrFv-PZzIW@P9k5XACu-au6qjwWyX))SRTooL?FSM7UKYP(@*_0jSs%kr^YNSp=HqxMAte2xWwo<^NvQSe z65ZqK0{?-V+Z`YWA_2b`@`83m&o6hcGB&Yb>2~b`8o?KvT3jEh8X6h`+$eijn6u*I z+}w)4W6u8w<{myd>B6OQc<=xMK)e%Q+W4?qDTTUoqUA^=FK~m^I6b;yKD6mB4`Dni zK*^q-o~Hx$)qcBk^;bftb3W$1mltRId!6`55Ul9)n3h57>Y?+qFJEx5qOHBH-BQz{pmbzpkTwf30c!iR`YinX+}vC;nwO5^ zN2;BX0QxDa=>u~(`a2dDXt^v~9=D$Nw?d7KNF@e4_m_LC!@>mci|hCrTqp8boDb?z z(2wXdHoHQOXJfHRxfXtAShja3W(_fiMp3`!nTUTzIu;sA)2*F zwkaG+3Qr(HKsBSS$N+XoL_u~MH{mN6LB~6t(b%MW!eIv!$_xd^u4H10ii&$cGZq$O zY1%2=KSpF`-tAiEg&+evXJcdY>E}CTrc}U*<9V98Gw;C*$)oM*6gXV-Boxxgvj=#K z5;r%$x5smNT6IN^j!zA(GwB{1BmN7lZqJ|e==mRx8-Y>9^!2F#{gDj1lYZRrP1z*a zl_uj#tXOUbLKjV0qXB>+%P!aeOvrqkSn@KId{F9)!Xqfdg2(yu4ho!K0RlLueuJ z6gtj!U-s#_r8{o84D}LbH|38e#X}-Ni6w37<4EOO1zU5PqBjQzFZ8gI{eeUYH&=pXGPawD?mOdJ7`DF3@PpZ*-N=FJVPq zf00~6#Ava8-DzzWbIPH$Exad&7BS|6amp=CIUUqrf*DD&2zJ42d^^e;BWG1U@(c7< z9%m(sm2)`689vx^L-aYB2`p=t z*474iY`h|_Gn!=O{1okd`*#t(=#4`P2HukUwzmqdSTeb+O?5-&%ZLFaeEXIQ&NYiE zsxO*yILvP`JZQRRDHtuudIAJ>A^LjVa5RM%Be5si`}JUtTh)kGjkqO+BpI=YY#PFdl%wYCg37fPrscf|$K{I> z&EixSs_weL>DlT2(qP!vn)iyVYMo{IqROm`x2}1*SyvR8!)0-f)B1Zz9)da-vdW+78f_e2+UC*?f5-Ltgp~+wA+lI!DZf2S78)zXX4Wc z30W3$K1T8wSQ^*1^bOC(?fPk+jF(As@5`4eESpL9{U1AcCcz9M^2@%b*6`?6Ptsn~ z{K*!(`YzYiaz6BoN*MMrS3n;f({@=5W+3zK8PK@lM%PL)P5MqUs=N0E#u)a8f0O2kG|J5qZC_kVp* zQO~%4l>ij4xqeLBoGr5>RqL`wLvlrvygcZaF?W?e&>@PqKaXbOdY2_03bPQ6>Kr2r zfv9X)jMe2)ZVA7YM=~PdiX1(~{QCSF6qpz7lXqI;;-u#v?p!A`< zR|^>oCX%R9QI90*+4v0>I2tsK-bwH|x&hPSskfasy?-m_cY5GhpCF@pzEH{TLU28` z#ump%=XC{1i<4~Gk#TWrVzpE>cTp#nR|&;pR?9KuJX!#QhOxdcL$%Y^_&!aL0skk? zu%H@~1<;~ATY!HJ%#v8wFt0wW8@v*&07MJ*gFiONtFkLTClyI5PBTwoDeo#nihLSCf z2w+_Ife8BM^#Dp)Nk)+%lB`6IiK;jtss@WU zrf7r;i9nfEfiY{CWpNd*oIt|+M@fVbd4ouzfz7yef&`fwzDU%B`rddD_kbp~Io$ZL z1$aA0IExHP`CM=e;BEmLyqq4=wtF#R0g+)UrcY~g8IMRqpzS-}9Dty;HPyZGaJ4u` zbMt}2zvQOYSlx)0k;lOIfioK|>MBCE1zkSmHR%FNor5}*c z#H_WWWV(;O^MD55ttB;|tCC#1ZqFa52YKQ+jbbXuvM|cA=09YkU1d*1Fg+bh6C@0J zq|eusl#6tSe5il}))=UbXD#*`wLA&=g|5eoWT5?X>Ez{!y-O!w-7lF=Pfwqy zGkG4*`Yk6fTY@3L2%oe)5JE$WC?|TW0{CBV;z3S9)7cLK5o3v%ygX_U7<@b#84S21 z{IpDv3lr%@;P2z`RD=jaT&nXM*UOwFj=tOQ;Lx-2F))VyN`1M<;YO#X2g2^$MP~)T zbWVz#OZCx6}ki|XNu9bi6JfiMEi$_mQhQ6gpOm!1Q2W zBqJ_MMY^YsO&7pDUCjK5iR42qzJi$51COZ&KN_wa=@fLDK#YV1p&;S@_8nAbH~mKo zGJb@5cG*cl$)h-h^c~31TJ{G@?vZ6lSyEO2eL;SEOZblgNlbBeCB551^5vHf-0S;O zS?%#|EwOmMf>)t$#UmCc$tj}a;xJ4-3=p{RucVW!U1PaN)inw-n+Gtv1n^r`|2OArnRJV0YEAi-3|!0J|K*plVc zGqacD)p$xW7f9Mp8jUxAvpHS{>w`^N4`u1 z`~on>B*jCIBVnMmN4@!CCK*a8FnTvfL~|!Kykd8< zwlqRs{v&qIlSSK-p5Ms7tYBu(ih`(Dkh{<>Z+?;#gtoWP4qtKg5jHy@@9yU}_;}JM zD8vBtNfZF}CKE*BZL4?R_QRJy89be`B!Pg{9s;xArmSwXh$ic8kUL$vUi;m$g&%nu zUx>Ko8hjm1zdx_$(eunE={(@fh73}0k9h_<;-z*wUXH5whx*+J>9T%qPkEyqlbCr< zIqef{Np&j>Wla6|lGxXVnCg)@eC9216CST_mWoN?Xhe|K;+S4jp>sVb=w6?C zd>(%p|E_;6pyHx@NY2Y_eB-6)_okcAZQ z53S9C=5S&pJw1?8FBft%egCHCm*Cc}ro0Yu+@EPi0@YV5Xf84!=F+X7NoQxClZvpe z#*m@IkednG4Ov^?u{b!89E^5)A#otw-kwLyVXn}!Q&2_!&DWC4YH7Q_M#qg1!Y&Jp zS>&jO@6pzMkbPW^INL=hIHYdg>o_<$Y6$h&Mz@L`El!BpK&|cZ&xNy<3I{&3XT#$# zkA`#omJw_)w*paH0jHxH?szm`6r9^uQsK61r`+#9dL&@r1QX+%t7GX;1oS+g)Wf25 zg&LfIqD-;X>!*wkPbV*>YDu-9e8*H)W|3ZuG=H{2;;|$0IAQ?4Y$&)80^(S`53R<*ayZ_E(vt6*mRnTIdF+? zQGCZvbERo4m(jZ=qNoamPA@sk83xw8G=CpXyXUz+qVO?fiS%S7qa}*pL2Bv#IdYj? z+n52vmm+zeKUvu3L_@?8f99GWDxj^8#u&IH_67=jH#TR8oHspiS2&d1Bt3h8+ zr?o0&w^aS`9uH{rcWQkF86yzf1mz<@bG=wJc#TIhM+C{hz;k-o<|I-23L6H4?Qw** zpYrp*XhcS$b#i)d@&+Z2=}~sp6k$188Zev>r**x7_utjf+N>S8fXKD`qq!pmi7$)H zd|xn`%!Wr;90PwnSuY4=AAtEbhUMKaLRVa-)_VCnRy-8$L2ou=!4zJ=Uv0dl zL;||W$?*u9tt!Lu_*35s4e+K;BBHhxZt)b^@&aEMC^r>c*tKBg6yr5?;Bff4%j8_K z9i8;*Z(uAUNAGmf%n4_ZM#bIbw8MH0oy7$R{j9kD?FH6N=dkFk*h`n%Nq420aGIzW zEZ^fO*aAoI{4z*Mx=~#3!{SQEQn*f`Q;VK^i?9FY4^K@*v=5LP0O8y8$#Wwk65~d! z3H(XrYehkY7)&eq_5aw5Fw@U^MPF>=#}xCSyi@VI&D4iy#c0A$UQ{;5WpCSc2t;R4 zTmz>dqdG}4jrBWyr5WvvFm0PbO9hORiODLsi}w<6f`K)wXMWIxba^yJwlkIL+El`A zQ*Y*v%xm7QZny4XUcIx3Z`l|goU{ng+Kp2@xSxb;bIPS@ugC@^d71lYZXP_Bbj%+@o{VE9rq4E(m^!^~DE(2oRX;Z*ktofS>L0rB+}K6DS_+OSRS{b!3oa`yj%U_IQl8V9_`` zv>)WcKOfEn0#^n!f1HN0w`KNK4pHhGU6bjIYIXyKhWie|B@*sG;S)CJ;M4 z`2b)dan-IFc451W#-w)i?zj*4O$D|sMn*LMb=UQU>gt`p3*<5cg=K``HZXWS+{VD1 zMi#>#39)wv1Ve~eSRjqGGS52_f+=r)TusRhCHq`j%5)>&WE31rKVqNVco{IY3s|v6 z4ZF?a2(0L$M?o*!Ldp-f%JPN90k7Jds~dr*a!LrVc^|6`K}Jp>*prV>=~X*%DgW3* zull3n?>e|XOd5|lvrsL{&xQ<;%nh_1iQCWpKfd3nR`u2Y_TZgMjn(z&ePd|T$Ud2< zDe6{zF*FL!qfxX8o+-a(l%uI?7I_rGz#QEx-fk7uFB!zGd_=d3>u}mx6WS)EwL9nlH^b1+Of8P*4+(ou2~dJGnvuL{JF*E+#V5KaN&=K^_HB^{LwK0{I0! zBOaYnd5nl@-=e9&XLB@t%VB@|J`|DGLi82d>{XC&7Q^1E)XmC5w`HS1+6D>YYynY# zG&^C}H@rp9T}FSwHBIK6c`RV+;r?nL^1R&?af$05%+Cqozk&;LtP0*2IK2JL_@~kGqcl&FLZf3^% zWO@b=dBE&kexPND>fMWZ#7t_w{9DS<#ojTJ$19uKs1F>mP>0M^NLAzMn{<_XVL6#` z2{zwm5VK2%*P`>@;E*QTJI|cfJI^UFItYsMk@&JuxrmW842H6+Jv`YqJhMJcSUF?8 z^F(>62_3ANg*d6j=CfkR^(is}e z-^uw2;dW7!!czF(X92KH1QoY@iz*p!@hI7E4(a0!5Z)ff!_w^Rls|c1@-oILYk)fX z<@rgkMB=paiq#9;Ua2ExG-I|hE6Ybu!_&%l%@L|8n9BVNg&af{ZY@&~P( zqxN&hHM}QkF)L97j^@U_3V>McU4*xi*Dk4CGRkr%@vSRq8I0UA?3YSjw<|3X|H7%2 z&DW{mf~3n78F@S>9__vQb)Ulx#lG-JchmN&-9=#Qjyo!oh7LqopHh0m z)g(U$>i4dkCatE`(G|`a+=Pu+klzUe0mw6n{D(o!(WD zMncj}*+21M{% z=n`%l^yHb0k~(k)IYceO)$~Yy5GtyD8u2Wvt}^dFyhkBM)xB3h6b8W84F=49F+p4cNPFbcyYbe|O^QPg~ z7S0u=(mQRXEYUuk!laO_nB2UPS3a6X6}Xm1-__M4(+CZiA9MnY0>QDgq|?JbS5`Im zv%nTUe?PYJZ2{?8zdA>HqR|XVH~nK3Iw!Atz^_8sj(k=uVg-)r1A$#_Ei8gz0;;z| z%eDTcUtL^&Z{E7sp)DszYhTJDo}oE1XuZ40 zK{|=7qB1!&X4R8tVRTNW*>+%NTmk_p6X0tHHFKjKbh@T~fD5JFgOR+b7O&z0r zFnUw!3yw(uTZA%PR&J-|jWVic?2{4!*M5oR)EeY=Xz1Lw_V zj-T&})QMuSdhsE*6-wu7Z%tz_2pcYlhQ@s;Z`!);z=l`M{*`nDkKSAG%ea{Z*h9e4 zY#{%N*E$aJbjjPARhyz0RMkBv7H5BEp2y#_^eKdw^q3deQSL3r(;;GcPNL3m2d$K4 zB^vcN_ue68OCq0~D-oUefBcjPQj6{Jj&@gvsY(U3wRwr}dGAZO)mCLq1F3=z_tZU0 zlApz7;KX&Sfg$Y`;`c0~ExzmhGGV4q+4|@EW~S?JRgH9E_ecKY`}<4G6WRyHhrnlP zk6&ZD&)3BaF~BfmpL)WFWhhH~kCDgXO`-nrJ)|#Oc(?ye0u}@456z#JsIRa4e5$uJ?pztmh6#TTvNS;q??(gyd6bU?j=^WG0O>M%bc|WG%jj*ZBc7AbwpHqe=9U3tVHe4W*Q$nO)V0GL`ddhu9ZKM zn~mIz6$z?~6G!JenXr_8e36U!Jv3Y!zFsu_J~R$r5t}Tx!tF$yPmqLFt=A53;)ZL* ze+>MyMnXl|={D?;A!SerS;XUbH9a{7%Hh7N}8lLl7c#9la#9JcQeLmtpl z$mj1Y{({||RUvZnMme)cJo=!mi%fuQB>X-$moBZx>K1#_&8VcR)sfU8YZ}>aVRZq6 z=gTXrJU%7r`cyQjKabcJsd!P6X=u&tl80@F-EgirC9a&SljqW+)$}%L?6X+F-TvD3iWU3#(68&s+uD^qi#OFHJ6rykma`eIQeVf2 zPZl?d(8_myRjh+-1d2K4$CLFRjV&3C6fzUU824VWAt%%PLM6Hu6>52dSFFe%qN)Wb zxerdj+Jd76!1c49?cFnTRe5sfq4GDmxSlZV26Il)?-S44E_Z%3u8JzKBNguUj_9Yk z-f3q*q=*_Z)B8m&?{2m2O8dT@-2B~qcD$f=k-2j5Au6JLQy%g?Tdh{Ip>u>s!qogw zA)0{Y8!7TvvvFCY&t3GfO}u)Z9!tNrvRY%G22N$B^vSw}dWZ7q%>kxo6xCBRQ4g{FmnB+rvPzw)p; zm60MO>8btZe_mdBv@{vUqLm8MBl@%Li+|b#>eXW-oN=U3v zfQzRGrI$eBHz4Jecf$4rG>6SuqX_pNZB{+Qrl@KdhJZ*J1d+n#z#*Us-X3~)JfIcp z6-iK-WL0|{a{viXSeDfZm;B)Gefca$OPgZh&%JXQIx2VE2*j2zVojsZPt7mZ%!<=w zEtTGg3XpzYYZX7`e*qWyG||jr+^6{78K$A}oXWTnS;PEvcxvKq@3?hH_c2{H!AHVk ztX>-`kgWm{JZH}l^83(mZhTv9@8BcntqQL}<(>alZ)eS)@%)#AOYizO^AkTZ!S8m?5)D-2mP zlkWO%hRZ8|3u4@e)}$i_K_>guqvMJENinzh1)M8L0!hZ8;E!%UW)YMHso6)tBCU8K zmXM^^q3Sv>LmFSx>2#sdbY+py> z=X``^xLVG&bLmiF8nTl|6KOxim&9BqVxyuqW4Al~q0ryaL*G%M>;%Z6v^qY>Tdpp2 z60Jz?lgUhho8^!GaQ>X3g?5a={aDr#9N}Z8E^oAZ=Vm$-yH)fei9W3mJQz=kL7CK1 zh5qigN+B9fV8HUhFD{mOm`0^df~05Q;Z&X=`GKsFkZHqB;HuR6Y)!+IozgY>alIMS zv*DSNEQ(h>qSs!PrH2FZu+Mrq@;y`wDdG62vi6bZ^=U-PnC1rAq@zFAv8;Oz6D`XV zWM;H^?|Y>;hY489ixqPx=we5xA+wB`<&j;>vZ!Gm?S;?J6< zq)%yL5+7-aYhWfD!r4KxDjM=6S%IkC&w(Q+m!)pvfr+A} zlFQ@}pPuKdhA)NA7Ws0omQIvkko)>|c?1*(%Fm}R$C3?Bcm^^I5~*L?{SVZVGtI$2 z?&w66?4(Wln)-b<`GeBHQZZQ7lg%>rl&_AloBtL=v6|3Gddyr}yqex~iqp`Kv`_+7 z{hV;-jB5|*fcHs~EC$l>TAe;Q#aR!oPxtEfPF!7dx^&nH=De%u{=ZbDUs;-bh(;~<3^T?_g@d1 zJIp|6r#9HOw-e~$6%|qqCDnvg;&uh>PH#?(Cx1E2Xx~G1D@7ZHO}6KvP)S5Uq2l*M z4A@90#r!eA>BxP5%1ju!oE0b2>7x$4k+wM({kOlA-Ldeg*UUx_bd&BiT8e$g^r;I8 zV}M}5m`{BU^m`jOn51+b}dRH1XmY&O4(tp?(S~pgBQ8zNAPvejc1lP-|l3|VV?hqw@TZfW>OP6 zICZXudbENq{a&Ljhb(!?p<`?I^3cUmqFnfE^xrpSIK@S|+C>Fix* zYK2wwGvcst9_E8|4e@3_{aR0BiEgkTbs@0`Q)SMM6*RawtTiwhdt_l^xOYn^XlN|;NI2c zD9L%YgLGs5QZ~YxzpUwu#Vf3;!tb-iI-c|6);H*u$Cvt2qdYwqyjxn#A>CW1f2>_A ziJElArD3$!7NPkf>(Mi+TP5Wbbj1}s0=F>b2M*xaKWlF3$Q0ym zqhu~10y#y}lWdSf`l=~$vLJslZh0}TAwoV}sb*9#5G81LS=cx<4RdBmS(VO$Kpw9= zgrLUQgFomnjs28IuvD(i38!j&2BM-Diwt&Cz9+`aGW83=zkJ)fI6I;v8uh5OE!;rU zCIPz}ibS6;4lmyQI;b()pEZQOl9YMUS=uRh0$bQd$=5La&dXyTK;t>RCftHl(saH!v)b#fda zHFIAd$e$EjDW92(M#K$k-Xr9Ocra$(Sz}3I2~&mc8;7i@lP zxP{BWgCAf%@UM@_gX!n+lZ^6Mlq!`~@U}Y_9)4ae&*s-Zo>R_aKSOv!iCtc(u!_yy zxJ3%TEoVc9jHIhux^!tWA9z0wC}l)sHk;5og)q}bsKZ(mYv*xsv%GxeSejkJli7IOc`fzJV1wF zm^1-o%}v))*u%TWF^Vg=%%o+EMxbPh@;1CSR)Z3gC-iC9&p#D@(8fkT{o#B!a?<#K z231c_r!)vd9<0==Q-aL+{UFgt-x@@lzvh+yB;J&g%H}wrqXik%!PKsvRcC@(whcE( zEAPBR{Ss}OjF1~`8X>>RKG6k-6W)36JS?t@NkMhbd)p3ru2A0N#D7iEZz|kEv(->57A~dj_ zK+DL-8zR43V|-k|_A@I%Zo9|u+kinKH9IRKTm4{SI1nr;Zr%Dz)Wt22sYf&>j8oR# z_zb>+x_Uz)+&0?WrO{uj7qM9I2L>fhwRawA4OdamFsR8yONNoO)ixDw#gjo4= zQ;q7p3Gz56nDqbs@o|yxL%GWJj5KXI2*Jdp@<}g^*1}BPAzpJl<6#LR26Lmkhf4fh zugMWR%rQF7@N+y1nZ|QU)O%0(d`=>u-Izn!@5tL#qDhtCheCXY`9>loRlXRQpPzg& z`0Ze$Q#}v^JT3grdW&*(Gk180X=3nz{LIjkM2Z1ItV%`uP5<@LK0(}Et#p^)uhjua zJYYmG>JA2(ds6b9eQUiBE zoxrX27kKwjLoaqN8DA0zKfx%1HuCB-(|)t;kmvXq+YNv2LhOpaL*DV-Y&^QC0{GC{ z<=)^3A?PRcsZ)L)&8En3I*CxZKeGnzyJ~AVzncJx=4N^0Y$ll$8E41HSiKDx+eNM? z<(g`T*%L}IMZ=b;9y24gm_Bf!xasQE8nBeH>=_!=OHvK&}KHufc+hE=8nfu7xI0%oWz?WCP+r_3VtVYC6-hJv(6x46Zd9I$_Mi#QB zZ2T}Z$!x~)vf#Em@u*92nIL&?#d2P==x;ycQjzUu*{YoEp6#Ylk(unS_2CbK)H(&? zPWi^}(&;qH->Dm{?*UEAeiWinJML5tHXK2coTqM$UgAa42EPxp2FAUt2BnE$*m!nh zBYYxWIA(2yISGGdX_LI%`+~qz4>)}bbuQ_vv4o^0pTQZAp^WPET#-5IjPPonzTH9tKQcgbS2 zjyABvGtEu4nMo>QcR4xDyJg%Z%O# zX_zHG?|okXKCK9e5ag8|amwP;m!2b2B_faP;9(PNTH*kwwoZ~6oJaMJv}bWPFooAY zGs7^gIx92k5f)2t%&c4FOYh%tme0#$4c%egx!_z!^ z+W6KnDYH@*$!AM3;pnTq&kFvF9kH8R<8*-|_VnR(`1=MfG=#$vZDC^4G_A%gei(Da-MNlKs?v@QnxV%aX;MRK>*)I4pY?cYgS^&NouL;=db0gWgCa zBB>j`91YF^FsW%S>tvrTUOuP|4<6`VGs6{mFA1lTP6OdQN$m7D0@pFTm&;wm3Z`0G zpHXRF2{E~5mydjWcI)kHxs!9riRsfHjaMQZHE~>4My)F<+!?IBOrBk=kst^A93nbz1AD^L?DE*3~AP2uQnBE|LLz}ku$Z^yGXt4=}FW>N; ziA^`-bLb?HAC{LcJR75GrE3ux9%x7JDh(rIizbC0aVgBGJ5d@qz zrHrw!mkiXheJofO{qMo= zh(65&-e9Fk=#oc2$y9>ALIfOx<>AM0;TpqGqmedoqm?NsnS&xoaX%gi!azXzMnSMp zz#w8`?RKN#JI}Yw;~xjLZ#^rH@lO3|XE};1x0YBRHJO^Y{KX&B8*J3d*nIaoXw*5E zk;7;kB1Uch)ho$w(PhZqbXPT=HVycyjIpL;1c2d(B^bkZ(%%Ve?Wzhi$o=^^U|>Ge z#GMGvvqEcg6WLJhE4V(_>F^Fv4qmA)~7_6;jnjCzYGq>%_w0v-I$7xS!X!%>)D_4$fIEb znku;PUCPxn$lDXa@R0o00PwSo0stlpM*!AIT)!9=-s1|<;dv(G?eEyQC1iy9b#pUr zG4g3q(7<>zWI37~ukN=t((vALp;F`YhN$bQAFI)&IlHAduTH6Z*+Yi{$3kuj6jYD8 zHgLod{C=ddOkG2U1Q~z{$e4HrgH=-UEAsFyh|-HJ*(bBXd1Y3OE`>k*d3=2O&O17r zS<_r-!Y+1LfI~I@F$A1yyoY)!o=(kAA-s23D0woWMd{Xb@x}jk*zqM9GOOp(2+rki zQhAn2Z?Y=wgNzHQ6%y$Z%w;`0iZD+vR2%l+F3l%>40AN*({HC?-LgXY!h`4tl}rXc zq%hQo1cc=v@^d%OQE;D<6N1a@g#$PBv_>t_y2pnkAC>9X(X;qv$WS)le(Oa~uK%f8 zla3E*pr>ZakNr)*L(Ai4iQrOfnOa=6fy}DSA4?{EJy(}Yw{pn8qUgGv&?K037}x8| z0?ESaz`%Bx&Ym^EI1-v|N+v>MEI$Rxy>y%>9mtYar(`jvT_kA z1_t<@41C4t#@K|Kqs5LaincN0E*z3~!##+UQMx5VAx<}eLM>O2IMK;&o*0(A!B8$A zug+UwX>JSav&g;TcY>dY_nVC?xPUP3XwdQZy@3fc5!tx8w7MP3WN^CqGEoVCK|1V* zjCV!)Avi}VwmcCwXEuJTYfAjBD|cH^XMkBA+^uw$Tf@N6rK;{yPsrfi)1*yhGV<5Z znmAU(dYz=O-FE%bVn$irqdk4z(*a~XrW^oa*`_|TRxb>7aYIr7SgkCI7krgLf1d@o z;Nkw(EXYhWc>;1`1N*}n@fTOy%??i}S2I`tsF}{y9^(px$1p035Hcu$zZUQ{Io%$b znftt!yrx>biK2;@$nQ4kvFDQ1jDpz=o3B#7@pkSCF+ZO-FCl}Di3)wfYYeR45Lp?B z6~-%y;^y6rU-vTuXx=prMVC8kVKL-cqQ>vQ-w3$d8D1U)MfK6xmcL(CI<^FlF^F0$dI7gW}Z>Jt?vV!aLsS`i!ni#H=V_UBo18zgH77x`Qek zl}d}T+4V7;PoKoxVDBzlc6NpEM$##aa^mN&@07B7Re#P2f2|kgdH0z`eBz@ZYEZgJ zJQd+odQT}SD8uHS`svOd_ui*+{LGmgm5j=VTt;nU8CQp*MmdK<^&OA4hCgs*XLruz zXd1b)rgAGhP7kHy5XmbmDQ77v9@=3enN88cACb;{mMBB82s5JXUVQYHD=Juwi);Ut zaN!O&&3wmG*8#zd-Ew@cfJpbGQ?>uq)mMjA^#$#s2qGY$(jkaQN;eWxDsgC}8>G7% zNof&@LrTLz4&B||-QC>{cj525_xYat$N6WUv-eqR)~uO#X4al}Flw-pI0ffy+*Uot zZZ6YW&;|Z(NsEPi`SCdA@Xf{u0%w-o2wr&3RuMeLB^UEDx>l=G*EKG11vR)ZcwUKK zR$w2KidHkp@+oVavg*%wEMwkhY;)_(P*rE^>fjjI&7rFgcKT)_S<@!N#~qX>IvNK= zbK~u9s&reNCLCu-MyT->a0%0rvV{Rfq-rNth6#)tnYqUO6bEeySvIV?CdfV7W5r{OwmDsvMnS((XPcm*WKp7P!zonKaVyId7rcd zLyJT2(Z`SPjeZuJVT1ymi4~75GmFDDYtAw?f)zRGcbsN!^{3OLhN+#ovELtuc4B8) zNa75CN2=Q9^XXYteo0Y`q&}`uxv7764P(&%$;dTPj`I$ZVPn^Jdto2C*|TKaFewQN zhu!L9O2Jfi*mHF}m6@l?Wj+SgH)}%^B-T=f@8jJX*LonE%TzzCUao?%hQMjxF_#c? zaJcv_=MA>*BHj46nej+BT0e{J&bNjsZv3U0Q><53rHLp#06BHIwLPz>o?o7K_h=wC z(o@Yku1}5D!3D5*ioimK=g2j z(#xzd8chRhF`c?RJ@SI7xQJbvH;d# z^#q(uJeKTS7I`U}J5FfNF!7m)q{PVnvy!;i8>>B;^i#_o zO&&glIG++t#EJtI!v1V#yg|(TO6yh60bN=R)~m5G4mn(*myPVf?P{xg_mW!lE3S<# zPwQzX+pEqk?+*J~`%^YQ863aKo+*)VH1+r|^Dowo`i|rRj?Z4a>Lhkw7OQkZdLiXa z=5|6YgL|0mCq4c}A2t1$qjzvs`tp3!L?yY}`7wAGnN~9E{rmP-*e_KgiIpEnkTN!Z z*_58N&Jjhh!8hH$)A#CNV?Xx)G-3IN`e(-&F2=FwX7S`>U}O9B!fP_%SpHpg(u+-pjQcX62Wh){^qKQlToS-+3GqF`e-D>8Q!vETku z+s;C^kaZtdpxWQV@zPtk&xXl9>R7nA+ar0`qr&~{2#KiMAy#TN_sZ}YdBWaNzKtN( z9tF9_ZM5tPs$t_9B;90OhUQ@2bGp61A492zjU^_7K^q8ufIZ5bK4Qr9O>&XMcPu?x7sbU4LENs>j(Qi?@piISPBSe*^{tt z-grl|uKHNp@@PcMwVcWsab~f&tu7!-bM;1jPO!)q%jKvUkFip5v=;k@R`LmP`v%G= zuo}~Yb1{V$WAdqWtjlhysYrt*x;7IU;Eh5hh~(Pdb z&Ljp>yABJqy@;eJ8gZ|SWd5I0p^y@I2pcfyEpjomp=S2E8Z%{?(Eawxu{C%v#$sq~ z?F1LX?p;Oob<-rIGg43`>*>D>LPp1~A@K9xdKRIYli-*y9a&65DeuhU9REX_YKap+ zuJ~>H^Oiny+IN;8Br|N-{M=h_WjmEIjJp{w zP5A0+{T3(9#MC7-ZS2Maa8aoIeND69?p+j0k+yh1LH)*xJlgr*enz@mQ9ajl?o zxK?d_RiH?sck<4RHqxLkTVCP<2TW~Mg8v#67xOi366s2l0XzC+5u?pPqX;R!3MW>w z%?DYS)tC^5FI3j%2K>C<7NJLS-;OttU7L-Jm-h2*AEyBd)5#u#yrYo`@(_xeY>1=A z&tf0eB!s41G(yM5ysj%kZd;qF!>JpXSj zBLNn>dag75Aa4-VcDJVBfAv$0$56@qF)v%B%GbOpag=L2vvG|fWSykKRQE=GliTeC z>{ipy$2YEsC9O4L2%MQiHnOPXz(!TD20n%4Uwl(!xF3D>#lDs3c6mCUSs~*uk_3F= z54AW%ufsXb7LOL4ds~=)4Tdj$cXq@Tsza(4*9@#l%zJ$7*5F?Wq~cc6pV9L!&cmt@ zxN{v^L=~~s&&PhQpkkm z{Q)~#9=|rz0)0-f)dv}=ofTID<9O_)E%daMIktBt@ac0lQ5}kiG=8(8VQfv=+bFUo zF{~$xZw>0+Nqn9)2@2Pk(v8{=MnY#(%qZs$Df`3Ty`c{Oy?017s?lXX%1Xk-7S&+h zJ%aT7*QCwjZ#ph@0*Rqvd|gwC*c2%GF^RTnng=}OUFY1I#cMRDCLNs?11Apo5Cs|> zd$pW|b&)!zs?3F6NmNl1+p+qk$n7@dm zO2h}v+TV&Qv zw;b~@vJJ=M#^6VpWHZhpH@VlUK~GJy2G8u>MGkG+tmL7NRiqY7!;F5U+GijFSY z{(KsTK@I((Ic(uL!=6b6tew*sKpjLZC0J=uFCd|Tw8+tgf5hq*e&zq-spf{Jro>{{ zN!+Ww6J>sRN;DBxy%U#l>9sP$gBQ@B&-pb(rmXGpM%+4v0JLiu`>Yd*_DhQxmhXqi zvxzQpDHrCy@_F?d6C;;CB=ppxZ+`bpt0#VW$;Pf(&+-VMZaO&S1_x&V^vhh8vmqMB z{rMsP>j~x0u07#!8eez97xl6=FI<Hz*;-VUl5gzXSz6D=HUfr-l^b&_<$~K%1%;bc1plv&ogZbS!zs!CQcHaL z>)OQjqK1;3)N z^q1Z~yIfBop;tc}^^1x;Ov#~i1d%XQoX~*vO0FV1*(-n$HClh0>9weTy^g;8t-_m~ zfBm;AUf#HB8`0M->1}mN&J#DDGDOy#8uf&xS=s~&0V)(nmO(iaGaTaxgA-G0;+V!w zbPtUTU_hlY9F9pbPDj-Fm&OIZm&N8+!Bqbcd7R#%Bu`JviuW1^H3`QPI=MI!A=E;2 zl4b_IwemxPb~i%~_f9vDe5~o}t1^e2(IJ(qgcxkTkhYg9*wmA>2yDJ?zyKi1Z^Aa4 z)_wr5PHRqSIyq4$@m_o2^)V|~VvbACv2JcPgN&kjXurXwWjmaSEmCu7xvCTnuR>_< zHahzd)s>QqN^;mx^j4u;z_SLKlP~lQe{C()|EE}wWrrxu@54fvb(el|0{89f96Qu% zZ9RPCSt5=C=ixQQF3K0O-d0(Qbhz5Nbm-jBxFA?GT7Zd}X`uF`Fq`k~#q&^-yg|{J z9U53Bp|QSE&L{OstIWJcL@d%`YMU}jX+{)0|(U?#0T8ivR0 zv4LtVKYAQtr`vahzBWUn3)~-!$#B^M4Q$ajZquP2rl!6UZ*2`|YKC~+3Cj!veT4?w zvo8qD%z_TWspyDi`c`*_l&MWl4fnMYNRaU~c?G(X`PJ2aXC6!CcqXM%OK^B07ntEG z${GbCcPMYi^T*Ezv=tYS@oI@aQ{J#y&48Mp(Q@C&v1uZIS8un(ZxouWZq!8%9hAk%DJqT}yN}sxb+yLkJgsBIBW&B!ek~j*D)6gQzfQrm zw2&DTdxuz0fKeP|^IMDcRAHBz=XI!TN}3`3ZLIYX_whm(!ha!h+J;VIdA8cJ6&?tFK$KU5_{|vD&|YG z#gNTFd5ba%n^=u$z{xIA6*g6t{=PD!(J|W6iY&e$%~iCw`X*q@*-k!|-T{>Y=KgDu zP(0Vx$*|!5?3+B-G1nh_m##GbO)WPP~I%deX?8bOjdu<}s1 z#jgrf5v4NNurUdP_wo8=4FX9O&l<8EOO}QeEhI-R2onm;9=Ez+3Pw{5H`8iBOBG(oQAYe{8JMnV&RXb&ywpoX>;PqPzw{|>F+c%bPk zbz-pvE{o1EA*{s`^3*}BNLbP8!xZ<+ui<9WH zr<(e`BQS z3G&y(l@uf)?{JxDU3T4>;ubzwp2a*H`s_;NyxZlpX$j(|Q zN7HN0Ot~7+G@W3-)cLMf(1lRwhyKgO5?av6t7oGF|MvElSl~`=Ux+liCZ7yt4!uaA zu0Hp-$yN5naR8^4mELV!RvfhxWs-5OjABZ5koR&Gg{>w`O6w=gH2kSGh_j+{ny!9! z*1K4}4OQ$R0tYQZ`kj}Wtt&@gXF7>U5PWM#M0Q`coe1lm+vB;*qAEXHSo?(VdTOR4 zBVzTmYgp0mPYSc#yHk#Yjst-{^y8sRW``p`H=y zQ&&QQ*s4;^c0a>2J2^ZiE@$_mlk@MJ=tOuIdQp9O1?v=bgf>V4@G3IPQ)gC6`u4>D zD$9fI^B7zyAfL^e-bTpj#7k=+0YbS=>}f}{^(2jRt9|!#>yys{6fxcK!(4GJU+Kq! z^2-hWRpD;c=Oy{`lDPoTQd%xAJ%Da;(Gn`tNh6q?&YzqG9Mk!*oE#j6Wi;d9O9Hx} zs75F{5mBvOa;g)#iB5ghrAwBdrV3mxA#}ZB&aB2?hUPgT8ezJP14LGP3ubuq%m^ml zFoG|k;KE&daYy}JCvwl9%zZgW4i&V@!Es;lH!R|U2XDLdEycmGFykiR!e!d5Oo`+? z^LCS2w(d5YgnyMA{yGx}FIF(f=kQAJnNdbES{b;70hzrLMGKI4TP#sGk3=Z6RwQqa z=3a_QULpcoK2=2ID#4woY}ndZ`+0*6RE}s+y$=Haa4#L3gsC$^hOyw1`R|Ih)yQV> z&zNOe^E(F=&>=dMdJ%HpX1c`Sao;00C0TD~Czcq~-;$*YS}K9;JR(xeM1y z(xNzBIXbsIJH8t*WA^^NG!kl@RGFW6@05;*Egl*|B*gEZ+3nYFL@r#mpk2*SOn@eQ z+@iv(9ZjE$9qz9C9dk5*oK-#M>L(^d{<3=f`f_?4egvI0w;Ov(I}}nLSxijaZXm4Z zTeGXsgYma*T&qgq>N2?xg9X~tng}C2;>yVe_Wl) zNmoJb>i*`5x9n$RVE@VO+NrH~OO1hqF5+0q%j2k%LFt&d5>vA9ONSJYAc~zU=EQiE zk=!2k7EeSETZWp>)yl>(*3J;1s26ro&=5tK0b!gJh>wQ^G&N)MXp@xVC0Cu#r`V9D z4U}9%B;GzNEBz!cx_5sZsWv&WKw>zMwY)G%-9zNvr~jVCxxd3!lZ=KMpp+{rIp13s z8r`x=nx5R4wgTdqO@%P8l&|Ks#g*Je=^Z(uEQZ4xq)YQ8Q$0>Ytot?PtbG^iVe`*> z4kpT?sJ`im zFbN*Z@sJLHW@~okJk>~|Qk^i?JCS6Iv=WcE|(?B8~p z!}u8!IIYrjCYm$nXPO1d zgu$pUp6C6IS#X@IXe*RR3$C-Ys&cp;Y4gW|x}~IU+>~~w-HhWgKDM%8YP=O)zdUpD zO4)1`9gtyOy{i6B;BPpbG!T8A@Gl`F%`$r`cmyPvsrIGOx_c_`FOIxCh{1DTrQT9vgBO=0LQERKMHa5ZJwz@s8 zm``gmiW!pGev%~W6Jvxa0E4fh>G?|LVZW(L{FAf1@QcnVBKADs<<8y@Sv)rUOwH2sMCv8=b4KK! zrShHTP#%ou#9_f1#)Nv$5mHu64)*gm%N|ehJmUy4NDzahY;KFpO1lZ_30{gib zlBCAFh{Kk~s6mR}7Xn0t!hd?Q5H0gqaoJ?BFVgrDc`GV&6i_s~(b;9p`iBJ~T981r zZ5)d9<7uz%u};Q{7S0)gB&BUn`tcK7Cy!A-$xp8ceyN}d(}!0?ZR7M~G+AqSIDGrr zJRJv80bxlsVsj3#ph%EXIP_AnpBK*5Jla<6sKFwF_JR;1C2KO5dmDynqxT)H8V$!{ z1-||AIlgjPvk`fKxA$&^0qa|ko-!5DjrAs*QDO7Dy_sw#%H@ftbK={9m3pesdk7=8 z28Wfj-zvsbGD9=nBj&#Q3`1c^+cG7}1*jDbBDC#`c@W|)bGT~ODE3(t;iiQN`m4ij zx7Ehb;Jy1s$aBf8qnQM8P`Y?guPI)eQG^s1#QLhawdp!XzpiK7)Y6f!ogoyj^~-5T z2#op(B&tv(I(OMN-G;-nAUEp%;^ON|mEiijO_jT2w5V@Ax63rDxCG_yL|aZ-`8DCQ z%H&T`)rqARI7_yKFz_MimyGpw%4g-Kku(?Y3!q%}VLEN1@95hyNQpiyYP8!~ zor6Qk*L=$>rk4y=TPhk-8AoQ7dC}BHCM)Ivz<80Ij>=0N>WWY)r;S2*jeeFAtJm|u zXjjgT#>hl7qFRv}yoA&6QC<-_bGTI@v1r5R3>wdRQsxT^J`f$!`zv|An`z;AfWf&~ znKV>gIP_Amk<{oh%>J(D-H8>UO-9yiEupwtI95!MbEP>v|UzLAkA3mJ3rSCe`ChNl}LkQ z!T3pjLS2MwQk?*Tb-2yuTVo_$p)75XZCd@DuOuT7t@+ufZ>%S5E^G0-_HW?Q>9JCH z;=QTD{E6PnzdEPcL@h-V+^T|L4IO3}^0_37vOy-MY!PqE{;a|1U#Dj=3$<8UnUiZM znvLoT3;%FxSlGJAD`R6N2oB)nJlH#>S$dSFk2Qs#BrOnq+;sT{Vd(O)lq}h30OTXh zrN*=En}RpOWGLQN>#x{qrl`6d#!5T1`4t1u>;>n|n~a^^R}iX*OhnQjQ*w8&h3398 zc#$_Z_GPrKlg{IcaSFfYs*&q{+tA^+!)PQ{IXQhJl05{?PgK&(V1Oq1(REDZO?={uIS+tX?nM+PXmSnoJsmhY9T35Hl1Wai4m>V zVlf(1Mpm}vQK8U2mxwa;#zy;ERW9M&=B(V4GLgcaM>f%jk1GAg&;lRi?-rtL>YEAP zJo4UO%MKqf=RjOs+V_Y3hH5gT8wbvL9sUs;I|_=@yHoRC&6(Z3DR!M26XbH1O}osj zM=)W&+&rdwVEe2q<=MfW=wso7!&21*6{Lnk#F}zI=urs?}fi1 z>qDwUd-rfq(N9>-^7I&l9bjp9<|lr@bDrQ~Z##1OimlRVEYDXnFF(uDC|2I2A*?%~nm*%zn3&&u6jA%viL>S^{HzTDxSGMgbS9+cNI%g)GVy zp2sUuT?{quy*s~e@JB{A>^9%@XM z?qRa^f`UQKqW=^+6hE#C&hZ4TGV5(yBP5jFH=OE=iOP$f&o(Eb4h$e~bGVf_Mx>!Q zF^0Pc3(;YZ5iNeiX_tH(NRD1JfZ+>DSkO^i;5w+xV2A10J#f!4sw`~H6L^?(s zy)EitVO0yit}RVyHS8bFRdO_fvV@kIc1-l72%q> zf99}Rz#qY61cX-?4;%p|=t>E1?FA#Dl1UF3sh*gRp4^15kDK5zsA+^Z#*}7)Y;y0? zDySur!QtkpVY~hNB~P5Q&5{!91K-*LEziS~kb z!^>&}5-TP=^-6k%uIRbitXz>fR$9sJ1ANvz@eG!OLyDv;r*zBe_)-T8nO8yQrLBk| zlq{e?fq&_%{&D*d%hr*!4x9U#_rYP)?T`Ahz3q_WFP8GX<7%lMNfU!YZZ8DOtKatB z>GJHPHM}3=n&nS3H*`%1;7}5X4k%|U$n*F73jK+W&u%y$)uv~N_F*cudv3n1VEnHW zM`FH;0DJN$aK^yZxzPb7j!=4bx({t1^VK22&fZ^asmx4Ji2pT7tonuu@8>_HQ@zUE zAFn@;f8?G%ts%+QJAZPoJ-$@Dyti+B@#TCHv~g*bFlW$LlSp54eLYumVckdM$nl;z zSVjpasWx9v^jZy%ZPPr^%GoY#Zf)y2n;NytyNnT(>?X2`i|O0?%9MNUO<~^Vl_8Wx z7m>m?Z}Jd-!CrwvDgFZIh^`hVRJD#+RqyWIcJx<&&b{42%yxw%+iRW}Y#)C(OJbMa z2L^&}hRQO=Plg&^i%`&=0`08;6;&ILmD1;gmavaN&xq>Y5!WDvyH+pR(d&Jk7N4Ab zaV>LJPP4QQo@1<*UOu~9{$5^-=j_E9!AwG*Q;tIWuUEnk2S1u-TR$sm-&(H^(nGeZ z>RO-lx`4yr;pV0}^s)9h@=D=b zvVix-U)<94Z{4(}^b$`DkReh(3=xz}8FU4GQYO*q2Um$w97bJ@c>-}BDqLXuRtN35 z^v8?Ge5)l-@DTWM2hPf1@$=h_23Vz-yjsq%%IFe%7%mtGq~!(~b%0PUl&m!Um5*r0B z1U&7(e+RcOf)MKALepm2XIL|m;KOkL=UTzu%wh2ss^4S7zJ$fA)9vlLy<)@uRAX8g ztP}ch4~io~rt#6quynAMDm@HVWP7YWd2Z z1&W>`^CKw<(z0w#kih;^JgZ`EzJF)ueH?sna8gu4g070PH&00-MMm4m=toM5AaQ#N z93B?R$M@vs5kZ3D(qnn~$dmKa2=T0E$dApYN}yN%$TTqJ;fjZU7@D7#rc7)jMJaGT z{KgoYK_u1oC+Sb3SN zmD}v?5D>(&A8i#XZS3uRo3C@9slL`cswCb|{y>LVj6CPGY_9+@=uhGUga+Z!(YPLG z!^vF@f4`eiHjm~iO_sTTc#1)+4d^=DtS%A%x@jUCKBrj!rC#k2x;tH7eBbb>%yPc1 z6%Efy<1?~h)Rz|HD8RSj<{|L?BbMlQH-709nV;WPx+5c&wF!~Ge?(A}wIWtaCEo@V z4!+HA?3x?wH6A|_U}Y_4VKODNRJm~Hf86+ovke|>J`Jvfgn-l#6V^vrMdIn|JMgk- zsm47#7;f5Xtq2$|Aj36S$-W69T<*W!?vAwN+9KX2M}JpAlywHc3*dc$GLlH zq4BAVB9y31^>?n)^`5Gr=gEfb1OI`vd;a&7#Wyz|0;l$;HlOkk-QAVM-bDh1sjA}a z@cR3f6Rwq3oAyd0#xS!KNI6iF!~1%Lnk$c5vqL;LSrKOsT* zttiS9MExP?yO>vGz5E(UU5hZK@Y<$>gFj%76`;lKe$6pQrq06o_2%~W@~BdHCr|?g zp5BHA1V9aN16WP;9UP*y?yrzge?8XV6k*H5Bv1J28j&T(6(;PdMS4DekANv~LNmvA z2?tM%jgMbmKzuq`YD%S6X7+7A={h??Z zcxDQop7X6nR(^iCBu{f6$p;Bc z`5^qgXO3|W74fDC9w%vOZfC{0`PSlwLz;qm5#Jjh@qFM}{i})?ZLqkcOQ(h1$xb0WpTm%R#%)Z)3 zIB3iWW?=&NHIX!HXwsBjVI2UDkc-ItJDM^*+oKh+xVShg4QpZ*w7|(H$@fe3WW*S( zOjePOb1ttmIKeDC81LGpH#c@f=reiM|74Vgj*W2uLvwUCZ5h#-9@x5UFi-A!wiDU- z0vz5GVh1)&+vQtal@rNRQm&3pN9iEOHo|959!rLVzr+aG+}UXwVAu3G6g?y1BJ(Lm z#^67pQE$)8sT4)lt8F`3@JZl0!~l#qoxn8kmFS_k@nU5zrO z95KU#B$Gb^+7G}n!P#|prsR35v_0fvps(*cuKA%4a8kh0YHQUldJOL>A8-?J4`VTd zgR*t!wfoi=2RayTKZ^bBHP#Onn-KstLqtTxpuNgIUg1vrbv_>-HNBnb%#ADU$T)pLLtXU_yz;yXKJ>SDtBjt|va+(F!`jQW zcy#m`3aI2#6&A_`C}>)jSfQ@*QYF>Z@xOLE5DX*v(6-tgGg;nmns$8D&1k(hb8TmDFRP^VSxbvl z;IdQt;#lB*yhXQgs0#V%Q)cu?Fgh>rXsJXVQQp^v`*XD;zy?|n++HA52 z3WvjoT+;@r5#ZZ4CmWd1q@|X1VP-!revqeMRbmQxY)nEPQ;u^3r^*fScvf#_nv_alb%6@YNGV84n5%CQ`Xy z=jyx^NEQ6pIAP!8`~w?1dsJS2US25Z2aZrxRkg9Pu`*Gp_b%n<7Ye`^<2G3Z_qk~Ew;dvhJGeIJ6ug{NrE$=w5uCFJn9T^9We!E|vvGVYAbw=(F4GwnJ zxktT7tgPhD%rXRQLaTu1>h??9ZC?3YdQMJYzS}CDQJnvF)lIX&4I-6Pf_{5gVgk3V z|8jeHOzpbb{MFaZ^?Z-hTsBV5n6UNIaZM`RU`0Ou{~&aM6dM~G!a)~Zy$-kywAED- zQ332?%;-LQdloEENwJ%3SkuUef*8dkv+1(hle)}IN}&{)R*vGl)BI^_ zdRl4pwHY@+GoUe?GoW_qSU=9FceJ-ZoO8_zYAT7jx^h7Pd-WZ9Gj;C-Vppm~vfaNt z7m6eL_4$6gwHate*Nw(lgYZ#8g7rbF87^`;6wwBUy_O!uOSh9|xL$TV&p%Qt%n6LJ zDa~Dj2|cy~1U5nAs%qWwhD83d2G8lX%`&%nv;c(^aB!Rtgi4n6w;24%op4aF+Zaf( z$CEj3KL>1UN8t0d#zFbo7y?d367}UUxLT<0?1pzgVI|4@}qRDd`G;DbTpM zm8nwGn%lE!-us*5n`_T|(gC*T$^GG})V$X_#W+F+{6~wUp9DGL)sFcuZ|AD`PjjTB zg32vD%`@6j7PuUG4dUX}oc1c$3*J9~lZUASFNG4&{J>~LH@B9T9jA7DQD(8bI_J8d z>r`8XuTBh+$NPUP+Xpt5jMjX^zyK52-+&|x3;3G7y*0=BYZ5>mMa*NDz2^D|u30PY z>iS`!_ewlVo{phlZ)ZnD5eFE@2Y$1$of@cRVgZkYz`$o< zlm201Wo-tS=k(Oxw^^qlx7^s$a=j(+)l7wTVi<<>L{INK=dGM98Eij5Wu&Sa$8Eht z0=5spxs8n}(_7H!&s@ac?(Yy2xU9V`J+G%vPEHn%FTZ5T@63DtNu6+`0eS|U%gO2K z;qAU>a$DP{_uabVa1HP~upKX289fBq2wR!Z-_!N?#&bT}*xJ$@t#EK~02EPT9%seL zg*wg1EKoM!>j1RELkPemUAtUQUf;jl+n*>hKwGqmRcH7NdNu-MqkMP7OEIRLqlgg>nC13IMk*pz@SAyNC>P_8Z~O8wwL*I+0jN?Ik^N3cI3zC_8qV^ z1!MA8v;b3YO&lOY{e>f*UIh=R2!1fqQ;G?@*wI+hgW*xKaqomp!CMU3B?Y$7={_X=$;UVvgm8ke}VFUow6HZZX|y zjw%ZoC8c&BOn&7!(F!>Z^QjhLOum-M;`-$#KQv#VHxK)`_6~?B0uX`4*VW~&*f`s1 z&=r4Ies(_ZDF9p#CjW&OVBGp+JOvZkPW)kWYLZMB0|U_FK&;)G7<9GBAD*3UUFSb{ zCcsJw`N;HqRdV+K$poCfFK;uR ze39jh=KNn+TODP9mgRSA`Y(vZ;+K6dzt-`W=fCd^$p4G||5vE`AD#hl?H@=&`5#b% k^`E)Px# literal 0 HcmV?d00001 diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs new file mode 100644 index 00000000000..b6f405f5654 --- /dev/null +++ b/substrate/frame/sassafras/src/lib.rs @@ -0,0 +1,1081 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Extension module for Sassafras consensus. +//! +//! [Sassafras](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS) +//! is a constant-time block production protocol that aims to ensure that there is +//! exactly one block produced with constant time intervals rather than multiple or none. +//! +//! We run a lottery to distribute block production slots in an epoch and to fix the +//! order validators produce blocks in, by the beginning of an epoch. +//! +//! Each validator signs the same VRF input and publishes the output on-chain. This +//! value is their lottery ticket that can be validated against their public key. +//! +//! We want to keep lottery winners secret, i.e. do not publish their public keys. +//! At the beginning of the epoch all the validators tickets are published but not +//! their public keys. +//! +//! A valid tickets is validated when an honest validator reclaims it on block +//! production. +//! +//! To prevent submission of fake tickets, resulting in empty slots, the validator +//! when submitting the ticket accompanies it with a SNARK of the statement: "Here's +//! my VRF output that has been generated using the given VRF input and my secret +//! key. I'm not telling you my keys, but my public key is among those of the +//! nominated validators", that is validated before the lottery. +//! +//! To anonymously publish the ticket to the chain a validator sends their tickets +//! to a random validator who later puts it on-chain as a transaction. + +#![deny(warnings)] +#![warn(unused_must_use, unsafe_code, unused_variables, unused_imports, missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +use log::{debug, error, trace, warn}; +use scale_codec::{Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; + +use frame_support::{ + dispatch::{DispatchResultWithPostInfo, Pays}, + traits::{Defensive, Get}, + weights::Weight, + BoundedVec, WeakBoundedVec, +}; +use frame_system::{ + offchain::{SendTransactionTypes, SubmitTransaction}, + pallet_prelude::BlockNumberFor, +}; +use sp_consensus_sassafras::{ + digests::{ConsensusLog, NextEpochDescriptor, SlotClaim}, + vrf, AuthorityId, Epoch, EpochConfiguration, Randomness, Slot, TicketBody, TicketEnvelope, + TicketId, RANDOMNESS_LENGTH, SASSAFRAS_ENGINE_ID, +}; +use sp_io::hashing; +use sp_runtime::{ + generic::DigestItem, + traits::{One, Zero}, + BoundToRuntimeAppPublic, +}; +use sp_std::prelude::Vec; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; +#[cfg(all(feature = "std", test))] +mod mock; +#[cfg(all(feature = "std", test))] +mod tests; + +pub mod weights; +pub use weights::WeightInfo; + +pub use pallet::*; + +const LOG_TARGET: &str = "sassafras::runtime"; + +// Contextual string used by the VRF to generate per-block randomness. +const RANDOMNESS_VRF_CONTEXT: &[u8] = b"SassafrasOnChainRandomness"; + +// Max length for segments holding unsorted tickets. +const SEGMENT_MAX_SIZE: u32 = 128; + +/// Authorities bounded vector convenience type. +pub type AuthoritiesVec = WeakBoundedVec::MaxAuthorities>; + +/// Epoch length defined by the configuration. +pub type EpochLengthFor = ::EpochLength; + +/// Tickets metadata. +#[derive(Debug, Default, PartialEq, Encode, Decode, TypeInfo, MaxEncodedLen, Clone, Copy)] +pub struct TicketsMetadata { + /// Number of outstanding next epoch tickets requiring to be sorted. + /// + /// These tickets are held by the [`UnsortedSegments`] storage map in segments + /// containing at most `SEGMENT_MAX_SIZE` items. + pub unsorted_tickets_count: u32, + + /// Number of tickets available for current and next epoch. + /// + /// These tickets are held by the [`TicketsIds`] storage map. + /// + /// The array entry to be used for the current epoch is computed as epoch index modulo 2. + pub tickets_count: [u32; 2], +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The Sassafras pallet. + #[pallet::pallet] + pub struct Pallet(_); + + /// Configuration parameters. + #[pallet::config] + pub trait Config: frame_system::Config + SendTransactionTypes> { + /// Amount of slots that each epoch should last. + #[pallet::constant] + type EpochLength: Get; + + /// Max number of authorities allowed. + #[pallet::constant] + type MaxAuthorities: Get; + + /// Epoch change trigger. + /// + /// Logic to be triggered on every block to query for whether an epoch has ended + /// and to perform the transition to the next epoch. + type EpochChangeTrigger: EpochChangeTrigger; + + /// Weight information for all calls of this pallet. + type WeightInfo: WeightInfo; + } + + /// Sassafras runtime errors. + #[pallet::error] + pub enum Error { + /// Submitted configuration is invalid. + InvalidConfiguration, + } + + /// Current epoch index. + #[pallet::storage] + #[pallet::getter(fn epoch_index)] + pub type EpochIndex = StorageValue<_, u64, ValueQuery>; + + /// Current epoch authorities. + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub type Authorities = StorageValue<_, AuthoritiesVec, ValueQuery>; + + /// Next epoch authorities. + #[pallet::storage] + #[pallet::getter(fn next_authorities)] + pub type NextAuthorities = StorageValue<_, AuthoritiesVec, ValueQuery>; + + /// First block slot number. + /// + /// As the slots may not be zero-based, we record the slot value for the fist block. + /// This allows to always compute relative indices for epochs and slots. + #[pallet::storage] + #[pallet::getter(fn genesis_slot)] + pub type GenesisSlot = StorageValue<_, Slot, ValueQuery>; + + /// Current block slot number. + #[pallet::storage] + #[pallet::getter(fn current_slot)] + pub type CurrentSlot = StorageValue<_, Slot, ValueQuery>; + + /// Current epoch randomness. + #[pallet::storage] + #[pallet::getter(fn randomness)] + pub type CurrentRandomness = StorageValue<_, Randomness, ValueQuery>; + + /// Next epoch randomness. + #[pallet::storage] + #[pallet::getter(fn next_randomness)] + pub type NextRandomness = StorageValue<_, Randomness, ValueQuery>; + + /// Randomness accumulator. + /// + /// Excluded the first imported block, its value is updated on block finalization. + #[pallet::storage] + #[pallet::getter(fn randomness_accumulator)] + pub(crate) type RandomnessAccumulator = StorageValue<_, Randomness, ValueQuery>; + + /// The configuration for the current epoch. + #[pallet::storage] + #[pallet::getter(fn config)] + pub type EpochConfig = StorageValue<_, EpochConfiguration, ValueQuery>; + + /// The configuration for the next epoch. + #[pallet::storage] + #[pallet::getter(fn next_config)] + pub type NextEpochConfig = StorageValue<_, EpochConfiguration>; + + /// Pending epoch configuration change that will be set as `NextEpochConfig` when the next + /// epoch is enacted. + /// + /// In other words, a configuration change submitted during epoch N will be enacted on epoch + /// N+2. This is to maintain coherence for already submitted tickets for epoch N+1 that where + /// computed using configuration parameters stored for epoch N+1. + #[pallet::storage] + pub type PendingEpochConfigChange = StorageValue<_, EpochConfiguration>; + + /// Stored tickets metadata. + #[pallet::storage] + pub type TicketsMeta = StorageValue<_, TicketsMetadata, ValueQuery>; + + /// Tickets identifiers map. + /// + /// The map holds tickets ids for the current and next epoch. + /// + /// The key is a tuple composed by: + /// - `u8` equal to epoch's index modulo 2; + /// - `u32` equal to the ticket's index in a sorted list of epoch's tickets. + /// + /// Epoch X first N-th ticket has key (X mod 2, N) + /// + /// Note that the ticket's index doesn't directly correspond to the slot index within the epoch. + /// The assigment is computed dynamically using an *outside-in* strategy. + /// + /// Be aware that entries within this map are never removed, only overwritten. + /// Last element index should be fetched from the [`TicketsMeta`] value. + #[pallet::storage] + pub type TicketsIds = StorageMap<_, Identity, (u8, u32), TicketId>; + + /// Tickets to be used for current and next epoch. + #[pallet::storage] + pub type TicketsData = StorageMap<_, Identity, TicketId, TicketBody>; + + /// Next epoch tickets unsorted segments. + /// + /// Contains lists of tickets where each list represents a batch of tickets + /// received via the `submit_tickets` extrinsic. + /// + /// Each segment has max length [`SEGMENT_MAX_SIZE`]. + #[pallet::storage] + pub type UnsortedSegments = + StorageMap<_, Identity, u32, BoundedVec>, ValueQuery>; + + /// The most recently set of tickets which are candidates to become the next + /// epoch tickets. + #[pallet::storage] + pub type SortedCandidates = + StorageValue<_, BoundedVec>, ValueQuery>; + + /// Parameters used to construct the epoch's ring verifier. + /// + /// In practice: Updatable Universal Reference String and the seed. + #[pallet::storage] + #[pallet::getter(fn ring_context)] + pub type RingContext = StorageValue<_, vrf::RingContext>; + + /// Ring verifier data for the current epoch. + #[pallet::storage] + pub type RingVerifierData = StorageValue<_, vrf::RingVerifierData>; + + /// Slot claim vrf-preoutput used to generate per-slot randomness. + /// + /// The value is ephemeral and is cleared on block finalization. + #[pallet::storage] + pub(crate) type ClaimTemporaryData = StorageValue<_, vrf::VrfOutput>; + + /// Genesis configuration for Sassafras protocol. + #[pallet::genesis_config] + #[derive(frame_support::DefaultNoBound)] + pub struct GenesisConfig { + /// Genesis authorities. + pub authorities: Vec, + /// Genesis epoch configuration. + pub epoch_config: EpochConfiguration, + /// Phantom config + #[serde(skip)] + pub _phantom: sp_std::marker::PhantomData, + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + EpochConfig::::put(self.epoch_config); + Pallet::::genesis_authorities_initialize(&self.authorities); + + #[cfg(feature = "construct-dummy-ring-context")] + { + debug!(target: LOG_TARGET, "Constructing dummy ring context"); + let ring_ctx = vrf::RingContext::new_testing(); + RingContext::::put(ring_ctx); + Pallet::::update_ring_verifier(&self.authorities); + } + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(block_num: BlockNumberFor) -> Weight { + debug_assert_eq!(block_num, frame_system::Pallet::::block_number()); + + let claim = >::digest() + .logs + .iter() + .find_map(|item| item.pre_runtime_try_to::(&SASSAFRAS_ENGINE_ID)) + .expect("Valid block must have a slot claim. qed"); + + CurrentSlot::::put(claim.slot); + + if block_num == One::one() { + Self::post_genesis_initialize(claim.slot); + } + + let randomness_output = claim + .vrf_signature + .outputs + .get(0) + .expect("Valid claim must have vrf signature; qed"); + ClaimTemporaryData::::put(randomness_output); + + let trigger_weight = T::EpochChangeTrigger::trigger::(block_num); + + T::WeightInfo::on_initialize() + trigger_weight + } + + fn on_finalize(_: BlockNumberFor) { + // At the end of the block, we can safely include the current slot randomness + // to the accumulator. If we've determined that this block was the first in + // a new epoch, the changeover logic has already occurred at this point + // (i.e. `enact_epoch_change` has already been called). + let randomness_input = vrf::slot_claim_input( + &Self::randomness(), + CurrentSlot::::get(), + EpochIndex::::get(), + ); + let randomness_output = ClaimTemporaryData::::take() + .expect("Unconditionally populated in `on_initialize`; `on_finalize` is always called after; qed"); + let randomness = randomness_output + .make_bytes::(RANDOMNESS_VRF_CONTEXT, &randomness_input); + Self::deposit_slot_randomness(&randomness); + + // Check if we are in the epoch's second half. + // If so, start sorting the next epoch tickets. + let epoch_length = T::EpochLength::get(); + let current_slot_idx = Self::current_slot_index(); + if current_slot_idx >= epoch_length / 2 { + let mut metadata = TicketsMeta::::get(); + if metadata.unsorted_tickets_count != 0 { + let next_epoch_idx = EpochIndex::::get() + 1; + let next_epoch_tag = (next_epoch_idx & 1) as u8; + let slots_left = epoch_length.checked_sub(current_slot_idx).unwrap_or(1); + Self::sort_segments( + metadata + .unsorted_tickets_count + .div_ceil(SEGMENT_MAX_SIZE * slots_left as u32), + next_epoch_tag, + &mut metadata, + ); + TicketsMeta::::set(metadata); + } + } + } + } + + #[pallet::call] + impl Pallet { + /// Submit next epoch tickets candidates. + /// + /// The number of tickets allowed to be submitted in one call is equal to the epoch length. + #[pallet::call_index(0)] + #[pallet::weight(T::WeightInfo::submit_tickets(tickets.len() as u32))] + pub fn submit_tickets( + origin: OriginFor, + tickets: BoundedVec>, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + + debug!(target: LOG_TARGET, "Received {} tickets", tickets.len()); + + let epoch_length = T::EpochLength::get(); + let current_slot_idx = Self::current_slot_index(); + if current_slot_idx > epoch_length / 2 { + warn!(target: LOG_TARGET, "Tickets shall be submitted in the first epoch half",); + return Err("Tickets shall be submitted in the first epoch half".into()) + } + + let Some(verifier) = RingVerifierData::::get().map(|v| v.into()) else { + warn!(target: LOG_TARGET, "Ring verifier key not initialized"); + return Err("Ring verifier key not initialized".into()) + }; + + let next_authorities = Self::next_authorities(); + + // Compute tickets threshold + let next_config = Self::next_config().unwrap_or_else(|| Self::config()); + let ticket_threshold = sp_consensus_sassafras::ticket_id_threshold( + next_config.redundancy_factor, + epoch_length as u32, + next_config.attempts_number, + next_authorities.len() as u32, + ); + + // Get next epoch params + let randomness = NextRandomness::::get(); + let epoch_idx = EpochIndex::::get() + 1; + + let mut valid_tickets = BoundedVec::with_bounded_capacity(tickets.len()); + + for ticket in tickets { + debug!(target: LOG_TARGET, "Checking ring proof"); + + let Some(ticket_id_output) = ticket.signature.outputs.get(0) else { + debug!(target: LOG_TARGET, "Missing ticket vrf output from ring signature"); + continue + }; + let ticket_id_input = + vrf::ticket_id_input(&randomness, ticket.body.attempt_idx, epoch_idx); + + // Check threshold constraint + let ticket_id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); + if ticket_id >= ticket_threshold { + debug!(target: LOG_TARGET, "Ignoring ticket over threshold ({:032x} >= {:032x})", ticket_id, ticket_threshold); + continue + } + + // Check for duplicates + if TicketsData::::contains_key(ticket_id) { + debug!(target: LOG_TARGET, "Ignoring duplicate ticket ({:032x})", ticket_id); + continue + } + + // Check ring signature + let sign_data = vrf::ticket_body_sign_data(&ticket.body, ticket_id_input); + if !ticket.signature.ring_vrf_verify(&sign_data, &verifier) { + debug!(target: LOG_TARGET, "Proof verification failure for ticket ({:032x})", ticket_id); + continue + } + + if let Ok(_) = valid_tickets.try_push(ticket_id).defensive_proof( + "Input segment has same length as bounded destination vector; qed", + ) { + TicketsData::::set(ticket_id, Some(ticket.body)); + } + } + + if !valid_tickets.is_empty() { + Self::append_tickets(valid_tickets); + } + + Ok(Pays::No.into()) + } + + /// Plan an epoch configuration change. + /// + /// The epoch configuration change is recorded and will be announced at the begining + /// of the next epoch together with next epoch authorities information. + /// In other words, the configuration will be enacted one epoch later. + /// + /// Multiple calls to this method will replace any existing planned config change + /// that has not been enacted yet. + #[pallet::call_index(1)] + #[pallet::weight(T::WeightInfo::plan_config_change())] + pub fn plan_config_change( + origin: OriginFor, + config: EpochConfiguration, + ) -> DispatchResult { + ensure_root(origin)?; + + ensure!( + config.redundancy_factor != 0 && config.attempts_number != 0, + Error::::InvalidConfiguration + ); + PendingEpochConfigChange::::put(config); + Ok(()) + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + let Call::submit_tickets { tickets } = call else { + return InvalidTransaction::Call.into() + }; + + // Discard tickets not coming from the local node or that are not included in a block + if source == TransactionSource::External { + warn!( + target: LOG_TARGET, + "Rejecting unsigned `submit_tickets` transaction from external source", + ); + return InvalidTransaction::BadSigner.into() + } + + // Current slot should be less than half of epoch length. + let epoch_length = T::EpochLength::get(); + let current_slot_idx = Self::current_slot_index(); + if current_slot_idx > epoch_length / 2 { + warn!(target: LOG_TARGET, "Tickets shall be proposed in the first epoch half",); + return InvalidTransaction::Stale.into() + } + + // This should be set such that it is discarded after the first epoch half + let tickets_longevity = epoch_length / 2 - current_slot_idx; + let tickets_tag = tickets.using_encoded(|bytes| hashing::blake2_256(bytes)); + + ValidTransaction::with_tag_prefix("Sassafras") + .priority(TransactionPriority::max_value()) + .longevity(tickets_longevity as u64) + .and_provides(tickets_tag) + .propagate(true) + .build() + } + } +} + +// Inherent methods +impl Pallet { + /// Determine whether an epoch change should take place at this block. + /// + /// Assumes that initialization has already taken place. + pub(crate) fn should_end_epoch(block_num: BlockNumberFor) -> bool { + // The epoch has technically ended during the passage of time between this block and the + // last, but we have to "end" the epoch now, since there is no earlier possible block we + // could have done it. + // + // The exception is for block 1: the genesis has slot 0, so we treat epoch 0 as having + // started at the slot of block 1. We want to use the same randomness and validator set as + // signalled in the genesis, so we don't rotate the epoch. + block_num > One::one() && Self::current_slot_index() >= T::EpochLength::get() + } + + /// Current slot index relative to the current epoch. + fn current_slot_index() -> u32 { + Self::slot_index(CurrentSlot::::get()) + } + + /// Slot index relative to the current epoch. + fn slot_index(slot: Slot) -> u32 { + slot.checked_sub(*Self::current_epoch_start()) + .and_then(|v| v.try_into().ok()) + .unwrap_or(u32::MAX) + } + + /// Finds the start slot of the current epoch. + /// + /// Only guaranteed to give correct results after `initialize` of the first + /// block in the chain (as its result is based off of `GenesisSlot`). + fn current_epoch_start() -> Slot { + Self::epoch_start(EpochIndex::::get()) + } + + /// Get the epoch's first slot. + fn epoch_start(epoch_index: u64) -> Slot { + const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ + if u64 is not enough we should crash for safety; qed."; + + let epoch_start = epoch_index.checked_mul(T::EpochLength::get() as u64).expect(PROOF); + GenesisSlot::::get().checked_add(epoch_start).expect(PROOF).into() + } + + pub(crate) fn update_ring_verifier(authorities: &[AuthorityId]) { + debug!(target: LOG_TARGET, "Loading ring context"); + let Some(ring_ctx) = RingContext::::get() else { + debug!(target: LOG_TARGET, "Ring context not initialized"); + return + }; + + let pks: Vec<_> = authorities.iter().map(|auth| *auth.as_ref()).collect(); + + debug!(target: LOG_TARGET, "Building ring verifier (ring size: {})", pks.len()); + let verifier_data = ring_ctx + .verifier_data(&pks) + .expect("Failed to build ring verifier. This is a bug"); + + RingVerifierData::::put(verifier_data); + } + + /// Enact an epoch change. + /// + /// WARNING: Should be called on every block once and if and only if [`should_end_epoch`] + /// has returned `true`. + /// + /// If we detect one or more skipped epochs the policy is to use the authorities and values + /// from the first skipped epoch. The tickets data is invalidated. + pub(crate) fn enact_epoch_change( + authorities: WeakBoundedVec, + next_authorities: WeakBoundedVec, + ) { + if next_authorities != authorities { + Self::update_ring_verifier(&next_authorities); + } + + // Update authorities + Authorities::::put(&authorities); + NextAuthorities::::put(&next_authorities); + + // Update epoch index + let mut epoch_idx = EpochIndex::::get() + 1; + + let slot_idx = CurrentSlot::::get().saturating_sub(Self::epoch_start(epoch_idx)); + if slot_idx >= T::EpochLength::get() { + // Detected one or more skipped epochs, clear tickets data and recompute epoch index. + Self::reset_tickets_data(); + let skipped_epochs = *slot_idx / T::EpochLength::get() as u64; + epoch_idx += skipped_epochs; + warn!( + target: LOG_TARGET, + "Detected {} skipped epochs, resuming from epoch {}", + skipped_epochs, + epoch_idx + ); + } + + let mut metadata = TicketsMeta::::get(); + let mut metadata_dirty = false; + + EpochIndex::::put(epoch_idx); + + let next_epoch_idx = epoch_idx + 1; + + // Updates current epoch randomness and computes the *next* epoch randomness. + let next_randomness = Self::update_epoch_randomness(next_epoch_idx); + + if let Some(config) = NextEpochConfig::::take() { + EpochConfig::::put(config); + } + + let next_config = PendingEpochConfigChange::::take(); + if let Some(next_config) = next_config { + NextEpochConfig::::put(next_config); + } + + // After we update the current epoch, we signal the *next* epoch change + // so that nodes can track changes. + let next_epoch = NextEpochDescriptor { + randomness: next_randomness, + authorities: next_authorities.into_inner(), + config: next_config, + }; + Self::deposit_next_epoch_descriptor_digest(next_epoch); + + let epoch_tag = (epoch_idx & 1) as u8; + + // Optionally finish sorting + if metadata.unsorted_tickets_count != 0 { + Self::sort_segments(u32::MAX, epoch_tag, &mut metadata); + metadata_dirty = true; + } + + // Clear the "prev ≡ next (mod 2)" epoch tickets counter and bodies. + // Ids are left since are just cyclically overwritten on-the-go. + let prev_epoch_tag = epoch_tag ^ 1; + let prev_epoch_tickets_count = &mut metadata.tickets_count[prev_epoch_tag as usize]; + if *prev_epoch_tickets_count != 0 { + for idx in 0..*prev_epoch_tickets_count { + if let Some(ticket_id) = TicketsIds::::get((prev_epoch_tag, idx)) { + TicketsData::::remove(ticket_id); + } + } + *prev_epoch_tickets_count = 0; + metadata_dirty = true; + } + + if metadata_dirty { + TicketsMeta::::set(metadata); + } + } + + // Call this function on epoch change to enact current epoch randomness. + // + // Returns the next epoch randomness. + fn update_epoch_randomness(next_epoch_index: u64) -> Randomness { + let curr_epoch_randomness = NextRandomness::::get(); + CurrentRandomness::::put(curr_epoch_randomness); + + let accumulator = RandomnessAccumulator::::get(); + + let mut buf = [0; RANDOMNESS_LENGTH + 8]; + buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]); + buf[RANDOMNESS_LENGTH..].copy_from_slice(&next_epoch_index.to_le_bytes()); + + let next_randomness = hashing::blake2_256(&buf); + NextRandomness::::put(&next_randomness); + + next_randomness + } + + // Deposit per-slot randomness. + fn deposit_slot_randomness(randomness: &Randomness) { + let accumulator = RandomnessAccumulator::::get(); + + let mut buf = [0; 2 * RANDOMNESS_LENGTH]; + buf[..RANDOMNESS_LENGTH].copy_from_slice(&accumulator[..]); + buf[RANDOMNESS_LENGTH..].copy_from_slice(&randomness[..]); + + let accumulator = hashing::blake2_256(&buf); + RandomnessAccumulator::::put(accumulator); + } + + // Deposit next epoch descriptor in the block header digest. + fn deposit_next_epoch_descriptor_digest(desc: NextEpochDescriptor) { + let item = ConsensusLog::NextEpochData(desc); + let log = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, item.encode()); + >::deposit_log(log) + } + + // Initialize authorities on genesis phase. + // + // Genesis authorities may have been initialized via other means (e.g. via session pallet). + // + // If this function has already been called with some authorities, then the new list + // should match the previously set one. + fn genesis_authorities_initialize(authorities: &[AuthorityId]) { + let prev_authorities = Authorities::::get(); + + if !prev_authorities.is_empty() { + // This function has already been called. + if prev_authorities.as_slice() == authorities { + return + } else { + panic!("Authorities were already initialized"); + } + } + + let authorities = WeakBoundedVec::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&authorities); + NextAuthorities::::put(&authorities); + } + + // Method to be called on first block `on_initialize` to properly populate some key parameters. + fn post_genesis_initialize(slot: Slot) { + // Keep track of the actual first slot used (may not be zero based). + GenesisSlot::::put(slot); + + // Properly initialize randomness using genesis hash and current slot. + // This is important to guarantee that a different set of tickets are produced for: + // - different chains which share the same ring parameters and + // - same chain started with a different slot base. + let genesis_hash = frame_system::Pallet::::parent_hash(); + let mut buf = genesis_hash.as_ref().to_vec(); + buf.extend_from_slice(&slot.to_le_bytes()); + let randomness = hashing::blake2_256(buf.as_slice()); + RandomnessAccumulator::::put(randomness); + + let next_randoness = Self::update_epoch_randomness(1); + + // Deposit a log as this is the first block in first epoch. + let next_epoch = NextEpochDescriptor { + randomness: next_randoness, + authorities: Self::next_authorities().into_inner(), + config: None, + }; + Self::deposit_next_epoch_descriptor_digest(next_epoch); + } + + /// Current epoch information. + pub fn current_epoch() -> Epoch { + let index = EpochIndex::::get(); + Epoch { + index, + start: Self::epoch_start(index), + length: T::EpochLength::get(), + authorities: Self::authorities().into_inner(), + randomness: Self::randomness(), + config: Self::config(), + } + } + + /// Next epoch information. + pub fn next_epoch() -> Epoch { + let index = EpochIndex::::get() + 1; + Epoch { + index, + start: Self::epoch_start(index), + length: T::EpochLength::get(), + authorities: Self::next_authorities().into_inner(), + randomness: Self::next_randomness(), + config: Self::next_config().unwrap_or_else(|| Self::config()), + } + } + + /// Fetch expected ticket-id for the given slot according to an "outside-in" sorting strategy. + /// + /// Given an ordered sequence of tickets [t0, t1, t2, ..., tk] to be assigned to n slots, + /// with n >= k, then the tickets are assigned to the slots according to the following + /// strategy: + /// + /// slot-index : [ 0, 1, 2, ............ , n ] + /// tickets : [ t1, t3, t5, ... , t4, t2, t0 ]. + /// + /// With slot-index computed as `epoch_start() - slot`. + /// + /// If `slot` value falls within the current epoch then we fetch tickets from the current epoch + /// tickets list. + /// + /// If `slot` value falls within the next epoch then we fetch tickets from the next epoch + /// tickets ids list. Note that in this case we may have not finished receiving all the tickets + /// for that epoch yet. The next epoch tickets should be considered "stable" only after the + /// current epoch first half slots were elapsed (see `submit_tickets_unsigned_extrinsic`). + /// + /// Returns `None` if, according to the sorting strategy, there is no ticket associated to the + /// specified slot-index (happens if a ticket falls in the middle of an epoch and n > k), + /// or if the slot falls beyond the next epoch. + /// + /// Before importing the first block this returns `None`. + pub fn slot_ticket_id(slot: Slot) -> Option { + if frame_system::Pallet::::block_number().is_zero() { + return None + } + let epoch_idx = EpochIndex::::get(); + let epoch_len = T::EpochLength::get(); + let mut slot_idx = Self::slot_index(slot); + let mut metadata = TicketsMeta::::get(); + + let get_ticket_idx = |slot_idx| { + let ticket_idx = if slot_idx < epoch_len / 2 { + 2 * slot_idx + 1 + } else { + 2 * (epoch_len - (slot_idx + 1)) + }; + debug!( + target: LOG_TARGET, + "slot-idx {} <-> ticket-idx {}", + slot_idx, + ticket_idx + ); + ticket_idx as u32 + }; + + let mut epoch_tag = (epoch_idx & 1) as u8; + + if epoch_len <= slot_idx && slot_idx < 2 * epoch_len { + // Try to get a ticket for the next epoch. Since its state values were not enacted yet, + // we may have to finish sorting the tickets. + epoch_tag ^= 1; + slot_idx -= epoch_len; + if metadata.unsorted_tickets_count != 0 { + Self::sort_segments(u32::MAX, epoch_tag, &mut metadata); + TicketsMeta::::set(metadata); + } + } else if slot_idx >= 2 * epoch_len { + return None + } + + let ticket_idx = get_ticket_idx(slot_idx); + if ticket_idx < metadata.tickets_count[epoch_tag as usize] { + TicketsIds::::get((epoch_tag, ticket_idx)) + } else { + None + } + } + + /// Returns ticket id and data associated with the given `slot`. + /// + /// Refer to the `slot_ticket_id` documentation for the slot-ticket association + /// criteria. + pub fn slot_ticket(slot: Slot) -> Option<(TicketId, TicketBody)> { + Self::slot_ticket_id(slot).and_then(|id| TicketsData::::get(id).map(|body| (id, body))) + } + + // Sort and truncate candidate tickets, cleanup storage. + fn sort_and_truncate(candidates: &mut Vec, max_tickets: usize) -> u128 { + candidates.sort_unstable(); + candidates.drain(max_tickets..).for_each(TicketsData::::remove); + candidates[max_tickets - 1] + } + + /// Sort the tickets which belong to the epoch with the specified `epoch_tag`. + /// + /// At most `max_segments` are taken from the `UnsortedSegments` structure. + /// + /// The tickets of the removed segments are merged with the tickets on the `SortedCandidates` + /// which is then sorted an truncated to contain at most `MaxTickets` entries. + /// + /// If all the entries in `UnsortedSegments` are consumed, then `SortedCandidates` is elected + /// as the next epoch tickets, else it is saved to be used by next calls of this function. + pub(crate) fn sort_segments(max_segments: u32, epoch_tag: u8, metadata: &mut TicketsMetadata) { + let unsorted_segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE); + let max_segments = max_segments.min(unsorted_segments_count); + let max_tickets = Self::epoch_length() as usize; + + // Fetch the sorted candidates (if any). + let mut candidates = SortedCandidates::::take().into_inner(); + + // There is an upper bound to check only if we already sorted the max number + // of allowed tickets. + let mut upper_bound = *candidates.get(max_tickets - 1).unwrap_or(&TicketId::MAX); + + let mut require_sort = false; + + // Consume at most `max_segments` segments. + // During the process remove every stale ticket from `TicketsData` storage. + for segment_idx in (0..unsorted_segments_count).rev().take(max_segments as usize) { + let segment = UnsortedSegments::::take(segment_idx); + metadata.unsorted_tickets_count -= segment.len() as u32; + + // Push only ids with a value less than the current `upper_bound`. + let prev_len = candidates.len(); + for ticket_id in segment { + if ticket_id < upper_bound { + candidates.push(ticket_id); + } else { + TicketsData::::remove(ticket_id); + } + } + require_sort = candidates.len() != prev_len; + + // As we approach the tail of the segments buffer the `upper_bound` value is expected + // to decrease (fast). We thus expect the number of tickets pushed into the + // `candidates` vector to follow an exponential drop. + // + // Given this, sorting and truncating after processing each segment may be an overkill + // as we may find pushing few tickets more and more often. Is preferable to perform + // the sort and truncation operations only when we reach some bigger threshold + // (currently set as twice the capacity of `SortCandidate`). + // + // The more is the protocol's redundancy factor (i.e. the ratio between tickets allowed + // to be submitted and the epoch length) the more this check becomes relevant. + if candidates.len() > 2 * max_tickets { + upper_bound = Self::sort_and_truncate(&mut candidates, max_tickets); + require_sort = false; + } + } + + if candidates.len() > max_tickets { + Self::sort_and_truncate(&mut candidates, max_tickets); + } else if require_sort { + candidates.sort_unstable(); + } + + if metadata.unsorted_tickets_count == 0 { + // Sorting is over, write to next epoch map. + candidates.iter().enumerate().for_each(|(i, id)| { + TicketsIds::::insert((epoch_tag, i as u32), id); + }); + metadata.tickets_count[epoch_tag as usize] = candidates.len() as u32; + } else { + // Keep the partial result for the next calls. + SortedCandidates::::set(BoundedVec::truncate_from(candidates)); + } + } + + /// Append a set of tickets to the segments map. + pub(crate) fn append_tickets(mut tickets: BoundedVec>) { + debug!(target: LOG_TARGET, "Appending batch with {} tickets", tickets.len()); + tickets.iter().for_each(|t| trace!(target: LOG_TARGET, " + {t:032x}")); + + let mut metadata = TicketsMeta::::get(); + let mut segment_idx = metadata.unsorted_tickets_count / SEGMENT_MAX_SIZE; + + while !tickets.is_empty() { + let rem = metadata.unsorted_tickets_count % SEGMENT_MAX_SIZE; + let to_be_added = tickets.len().min((SEGMENT_MAX_SIZE - rem) as usize); + + let mut segment = UnsortedSegments::::get(segment_idx); + let _ = segment + .try_extend(tickets.drain(..to_be_added)) + .defensive_proof("We don't add more than `SEGMENT_MAX_SIZE` and this is the maximum bound for the vector."); + UnsortedSegments::::insert(segment_idx, segment); + + metadata.unsorted_tickets_count += to_be_added as u32; + segment_idx += 1; + } + + TicketsMeta::::set(metadata); + } + + /// Remove all tickets related data. + /// + /// May not be efficient as the calling places may repeat some of this operations + /// but is a very extraordinary operation (hopefully never happens in production) + /// and better safe than sorry. + fn reset_tickets_data() { + let metadata = TicketsMeta::::get(); + + // Remove even/odd-epoch data. + for epoch_tag in 0..=1 { + for idx in 0..metadata.tickets_count[epoch_tag] { + if let Some(id) = TicketsIds::::get((epoch_tag as u8, idx)) { + TicketsData::::remove(id); + } + } + } + + // Remove all unsorted tickets segments. + let segments_count = metadata.unsorted_tickets_count.div_ceil(SEGMENT_MAX_SIZE); + (0..segments_count).for_each(UnsortedSegments::::remove); + + // Reset sorted candidates + SortedCandidates::::kill(); + + // Reset tickets metadata + TicketsMeta::::kill(); + } + + /// Submit next epoch validator tickets via an unsigned extrinsic constructed with a call to + /// `submit_unsigned_transaction`. + /// + /// The submitted tickets are added to the next epoch outstanding tickets as long as the + /// extrinsic is called within the first half of the epoch. Tickets received during the + /// second half are dropped. + pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { + let tickets = BoundedVec::truncate_from(tickets); + let call = Call::submit_tickets { tickets }; + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + Ok(_) => true, + Err(e) => { + error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); + false + }, + } + } + + /// Epoch length + pub fn epoch_length() -> u32 { + T::EpochLength::get() + } +} + +/// Trigger an epoch change, if any should take place. +pub trait EpochChangeTrigger { + /// May trigger an epoch change, if any should take place. + /// + /// Returns an optional `Weight` if epoch change has been triggered. + /// + /// This should be called during every block, after initialization is done. + fn trigger(_: BlockNumberFor) -> Weight; +} + +/// An `EpochChangeTrigger` which does nothing. +/// +/// In practice this means that the epoch change logic is left to some external component +/// (e.g. pallet-session). +pub struct EpochChangeExternalTrigger; + +impl EpochChangeTrigger for EpochChangeExternalTrigger { + fn trigger(_: BlockNumberFor) -> Weight { + // nothing - trigger is external. + Weight::zero() + } +} + +/// An `EpochChangeTrigger` which recycle the same authorities set forever. +/// +/// The internal trigger should only be used when no other module is responsible for +/// changing authority set. +pub struct EpochChangeInternalTrigger; + +impl EpochChangeTrigger for EpochChangeInternalTrigger { + fn trigger(block_num: BlockNumberFor) -> Weight { + if Pallet::::should_end_epoch(block_num) { + let authorities = Pallet::::next_authorities(); + let next_authorities = authorities.clone(); + let len = next_authorities.len() as u32; + Pallet::::enact_epoch_change(authorities, next_authorities); + T::WeightInfo::enact_epoch_change(len, T::EpochLength::get()) + } else { + Weight::zero() + } + } +} + +impl BoundToRuntimeAppPublic for Pallet { + type Public = AuthorityId; +} diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs new file mode 100644 index 00000000000..b700207c499 --- /dev/null +++ b/substrate/frame/sassafras/src/mock.rs @@ -0,0 +1,343 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities for Sassafras pallet. + +use crate::{self as pallet_sassafras, EpochChangeInternalTrigger, *}; + +use frame_support::{ + derive_impl, + traits::{ConstU32, OnFinalize, OnInitialize}, +}; +use sp_consensus_sassafras::{ + digests::SlotClaim, + vrf::{RingProver, VrfSignature}, + AuthorityIndex, AuthorityPair, EpochConfiguration, Slot, TicketBody, TicketEnvelope, TicketId, +}; +use sp_core::{ + crypto::{ByteArray, Pair, UncheckedFrom, VrfSecret, Wraps}, + ed25519::Public as EphemeralPublic, + H256, U256, +}; +use sp_runtime::{ + testing::{Digest, DigestItem, Header, TestXt}, + BuildStorage, +}; + +const LOG_TARGET: &str = "sassafras::tests"; + +const EPOCH_LENGTH: u32 = 10; +const MAX_AUTHORITIES: u32 = 100; + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] +impl frame_system::Config for Test { + type Block = frame_system::mocking::MockBlock; +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + RuntimeCall: From, +{ + type OverarchingCall = RuntimeCall; + type Extrinsic = TestXt; +} + +impl pallet_sassafras::Config for Test { + type EpochLength = ConstU32; + type MaxAuthorities = ConstU32; + type EpochChangeTrigger = EpochChangeInternalTrigger; + type WeightInfo = (); +} + +frame_support::construct_runtime!( + pub enum Test { + System: frame_system, + Sassafras: pallet_sassafras, + } +); + +// Default used for most of the tests. +// +// The redundancy factor has been set to max value to accept all submitted +// tickets without worrying about the threshold. +pub const TEST_EPOCH_CONFIGURATION: EpochConfiguration = + EpochConfiguration { redundancy_factor: u32::MAX, attempts_number: 5 }; + +/// Build and returns test storage externalities +pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { + new_test_ext_with_pairs(authorities_len, false).1 +} + +/// Build and returns test storage externalities and authority set pairs used +/// by Sassafras genesis configuration. +pub fn new_test_ext_with_pairs( + authorities_len: usize, + with_ring_context: bool, +) -> (Vec, sp_io::TestExternalities) { + let pairs = (0..authorities_len) + .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .collect::>(); + + let authorities: Vec<_> = pairs.iter().map(|p| p.public()).collect(); + + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + pallet_sassafras::GenesisConfig:: { + authorities: authorities.clone(), + epoch_config: TEST_EPOCH_CONFIGURATION, + _phantom: sp_std::marker::PhantomData, + } + .assimilate_storage(&mut storage) + .unwrap(); + + let mut ext: sp_io::TestExternalities = storage.into(); + + if with_ring_context { + ext.execute_with(|| { + log::debug!(target: LOG_TARGET, "Building testing ring context"); + let ring_ctx = vrf::RingContext::new_testing(); + RingContext::::set(Some(ring_ctx.clone())); + Sassafras::update_ring_verifier(&authorities); + }); + } + + (pairs, ext) +} + +fn make_ticket_with_prover( + attempt: u32, + pair: &AuthorityPair, + prover: &RingProver, +) -> TicketEnvelope { + log::debug!("attempt: {}", attempt); + + // Values are referring to the next epoch + let epoch = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); + + // Make a dummy ephemeral public that hopefully is unique within one test instance. + // In the tests, the values within the erased public are just used to compare + // ticket bodies, so it is not important to be a valid key. + let mut raw: [u8; 32] = [0; 32]; + raw.copy_from_slice(&pair.public().as_slice()[0..32]); + let erased_public = EphemeralPublic::unchecked_from(raw); + let revealed_public = erased_public; + + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt, epoch); + + let body = TicketBody { attempt_idx: attempt, erased_public, revealed_public }; + let sign_data = vrf::ticket_body_sign_data(&body, ticket_id_input); + + let signature = pair.as_ref().ring_vrf_sign(&sign_data, &prover); + + // Ticket-id can be generated via vrf-preout. + // We don't care that much about its value here. + TicketEnvelope { body, signature } +} + +pub fn make_prover(pair: &AuthorityPair) -> RingProver { + let public = pair.public(); + let mut prover_idx = None; + + let ring_ctx = Sassafras::ring_context().unwrap(); + + let pks: Vec = Sassafras::authorities() + .iter() + .enumerate() + .map(|(idx, auth)| { + if public == *auth { + prover_idx = Some(idx); + } + *auth.as_ref() + }) + .collect(); + + log::debug!("Building prover. Ring size: {}", pks.len()); + let prover = ring_ctx.prover(&pks, prover_idx.unwrap()).unwrap(); + log::debug!("Done"); + + prover +} + +/// Construct `attempts` tickets envelopes for the next epoch. +/// +/// E.g. by passing an optional threshold +pub fn make_tickets(attempts: u32, pair: &AuthorityPair) -> Vec { + let prover = make_prover(pair); + (0..attempts) + .into_iter() + .map(|attempt| make_ticket_with_prover(attempt, pair, &prover)) + .collect() +} + +pub fn make_ticket_body(attempt_idx: u32, pair: &AuthorityPair) -> (TicketId, TicketBody) { + // Values are referring to the next epoch + let epoch = Sassafras::epoch_index() + 1; + let randomness = Sassafras::next_randomness(); + + let ticket_id_input = vrf::ticket_id_input(&randomness, attempt_idx, epoch); + let ticket_id_output = pair.as_inner_ref().vrf_output(&ticket_id_input); + + let id = vrf::make_ticket_id(&ticket_id_input, &ticket_id_output); + + // Make a dummy ephemeral public that hopefully is unique within one test instance. + // In the tests, the values within the erased public are just used to compare + // ticket bodies, so it is not important to be a valid key. + let mut raw: [u8; 32] = [0; 32]; + raw[..16].copy_from_slice(&pair.public().as_slice()[0..16]); + raw[16..].copy_from_slice(&id.to_le_bytes()); + let erased_public = EphemeralPublic::unchecked_from(raw); + let revealed_public = erased_public; + + let body = TicketBody { attempt_idx, erased_public, revealed_public }; + + (id, body) +} + +pub fn make_dummy_ticket_body(attempt_idx: u32) -> (TicketId, TicketBody) { + let hash = sp_core::hashing::blake2_256(&attempt_idx.to_le_bytes()); + + let erased_public = EphemeralPublic::unchecked_from(hash); + let revealed_public = erased_public; + + let body = TicketBody { attempt_idx, erased_public, revealed_public }; + + let mut bytes = [0u8; 16]; + bytes.copy_from_slice(&hash[..16]); + let id = TicketId::from_le_bytes(bytes); + + (id, body) +} + +pub fn make_ticket_bodies( + number: u32, + pair: Option<&AuthorityPair>, +) -> Vec<(TicketId, TicketBody)> { + (0..number) + .into_iter() + .map(|i| match pair { + Some(pair) => make_ticket_body(i, pair), + None => make_dummy_ticket_body(i), + }) + .collect() +} + +/// Persist the given tickets in the unsorted segments buffer. +/// +/// This function skips all the checks performed by the `submit_tickets` extrinsic and +/// directly appends the tickets to the `UnsortedSegments` structure. +pub fn persist_next_epoch_tickets_as_segments(tickets: &[(TicketId, TicketBody)]) { + let mut ids = Vec::with_capacity(tickets.len()); + tickets.iter().for_each(|(id, body)| { + TicketsData::::set(id, Some(body.clone())); + ids.push(*id); + }); + let max_chunk_size = Sassafras::epoch_length() as usize; + ids.chunks(max_chunk_size).for_each(|chunk| { + Sassafras::append_tickets(BoundedVec::truncate_from(chunk.to_vec())); + }) +} + +/// Calls the [`persist_next_epoch_tickets_as_segments`] and then proceeds to the +/// sorting of the candidates. +/// +/// Only "winning" tickets are left. +pub fn persist_next_epoch_tickets(tickets: &[(TicketId, TicketBody)]) { + persist_next_epoch_tickets_as_segments(tickets); + // Force sorting of next epoch tickets (enactment) by explicitly querying the first of them. + let next_epoch = Sassafras::next_epoch(); + assert_eq!(TicketsMeta::::get().unsorted_tickets_count, tickets.len() as u32); + Sassafras::slot_ticket(next_epoch.start).unwrap(); + assert_eq!(TicketsMeta::::get().unsorted_tickets_count, 0); +} + +fn slot_claim_vrf_signature(slot: Slot, pair: &AuthorityPair) -> VrfSignature { + let mut epoch = Sassafras::epoch_index(); + let mut randomness = Sassafras::randomness(); + + // Check if epoch is going to change on initialization. + let epoch_start = Sassafras::current_epoch_start(); + let epoch_length = EPOCH_LENGTH.into(); + if epoch_start != 0_u64 && slot >= epoch_start + epoch_length { + epoch += slot.saturating_sub(epoch_start).saturating_div(epoch_length); + randomness = crate::NextRandomness::::get(); + } + + let data = vrf::slot_claim_sign_data(&randomness, slot, epoch); + pair.as_ref().vrf_sign(&data) +} + +/// Construct a `PreDigest` instance for the given parameters. +pub fn make_slot_claim( + authority_idx: AuthorityIndex, + slot: Slot, + pair: &AuthorityPair, +) -> SlotClaim { + let vrf_signature = slot_claim_vrf_signature(slot, pair); + SlotClaim { authority_idx, slot, vrf_signature, ticket_claim: None } +} + +/// Construct a `Digest` with a `SlotClaim` item. +pub fn make_digest(authority_idx: AuthorityIndex, slot: Slot, pair: &AuthorityPair) -> Digest { + let claim = make_slot_claim(authority_idx, slot, pair); + Digest { logs: vec![DigestItem::from(&claim)] } +} + +pub fn initialize_block( + number: u64, + slot: Slot, + parent_hash: H256, + pair: &AuthorityPair, +) -> Digest { + let digest = make_digest(0, slot, pair); + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + digest +} + +pub fn finalize_block(number: u64) -> Header { + Sassafras::on_finalize(number); + System::finalize() +} + +/// Progress the pallet state up to the given block `number` and `slot`. +pub fn go_to_block(number: u64, slot: Slot, pair: &AuthorityPair) -> Digest { + Sassafras::on_finalize(System::block_number()); + let parent_hash = System::finalize().hash(); + + let digest = make_digest(0, slot, pair); + + System::reset_events(); + System::initialize(&number, &parent_hash, &digest); + Sassafras::on_initialize(number); + + digest +} + +/// Progress the pallet state up to the given block `number`. +/// Slots will grow linearly accordingly to blocks. +pub fn progress_to_block(number: u64, pair: &AuthorityPair) -> Option { + let mut slot = Sassafras::current_slot() + 1; + let mut digest = None; + for i in System::block_number() + 1..=number { + let dig = go_to_block(i, slot, pair); + digest = Some(dig); + slot = slot + 1; + } + digest +} diff --git a/substrate/frame/sassafras/src/tests.rs b/substrate/frame/sassafras/src/tests.rs new file mode 100644 index 00000000000..ec3425cce7b --- /dev/null +++ b/substrate/frame/sassafras/src/tests.rs @@ -0,0 +1,874 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Sassafras pallet. + +use crate::*; +use mock::*; + +use sp_consensus_sassafras::Slot; + +fn h2b(hex: &str) -> [u8; N] { + array_bytes::hex2array_unchecked(hex) +} + +fn b2h(bytes: [u8; N]) -> String { + array_bytes::bytes2hex("", &bytes) +} + +#[test] +fn genesis_values_assumptions_check() { + new_test_ext(3).execute_with(|| { + assert_eq!(Sassafras::authorities().len(), 3); + assert_eq!(Sassafras::config(), TEST_EPOCH_CONFIGURATION); + }); +} + +#[test] +fn post_genesis_randomness_initialization() { + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); + let pair = &pairs[0]; + + ext.execute_with(|| { + assert_eq!(Sassafras::randomness(), [0; 32]); + assert_eq!(Sassafras::next_randomness(), [0; 32]); + assert_eq!(Sassafras::randomness_accumulator(), [0; 32]); + + // Test the values with a zero genesis block hash + let _ = initialize_block(1, 123.into(), [0x00; 32].into(), pair); + + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("b9497550deeeb4adc134555930de61968a0558f8947041eb515b2f5fa68ffaf7") + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("febcc7fe9539fe17ed29f525831394edfb30b301755dc9bd91584a1f065faf87") + ); + let (id1, _) = make_ticket_bodies(1, Some(pair))[0]; + + // Reset what is relevant + NextRandomness::::set([0; 32]); + RandomnessAccumulator::::set([0; 32]); + + // Test the values with a non-zero genesis block hash + let _ = initialize_block(1, 123.into(), [0xff; 32].into(), pair); + + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("51c1e3b3a73d2043b3cabae98ff27bdd4aad8967c21ecda7b9465afaa0e70f37") + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("466bf3007f2e17bffee0b3c42c90f33d654f5ff61eff28b0cc650825960abd52") + ); + let (id2, _) = make_ticket_bodies(1, Some(pair))[0]; + + // Ticket ids should be different when next epoch randomness is different + assert_ne!(id1, id2); + + // Reset what is relevant + NextRandomness::::set([0; 32]); + RandomnessAccumulator::::set([0; 32]); + + // Test the values with a non-zero genesis block hash + let _ = initialize_block(1, 321.into(), [0x00; 32].into(), pair); + + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("d85d84a54f79453000eb62e8a17b30149bd728d3232bc2787a89d51dc9a36008") + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("8a035eed02b5b8642b1515ed19752df8df156627aea45c4ef6e3efa88be9a74d") + ); + let (id2, _) = make_ticket_bodies(1, Some(pair))[0]; + + // Ticket ids should be different when next epoch randomness is different + assert_ne!(id1, id2); + }); +} + +// Tests if the sorted tickets are assigned to each slot outside-in. +#[test] +fn slot_ticket_id_outside_in_fetch() { + let genesis_slot = Slot::from(100); + let tickets_count = 6; + + // Current epoch tickets + let curr_tickets: Vec = (0..tickets_count).map(|i| i as TicketId).collect(); + + // Next epoch tickets + let next_tickets: Vec = + (0..tickets_count - 1).map(|i| (i + tickets_count) as TicketId).collect(); + + new_test_ext(0).execute_with(|| { + // Some corner cases + TicketsIds::::insert((0, 0_u32), 1_u128); + + // Cleanup + (0..3).for_each(|i| TicketsIds::::remove((0, i as u32))); + + curr_tickets + .iter() + .enumerate() + .for_each(|(i, id)| TicketsIds::::insert((0, i as u32), id)); + + next_tickets + .iter() + .enumerate() + .for_each(|(i, id)| TicketsIds::::insert((1, i as u32), id)); + + TicketsMeta::::set(TicketsMetadata { + tickets_count: [curr_tickets.len() as u32, next_tickets.len() as u32], + unsorted_tickets_count: 0, + }); + + // Before importing the first block the pallet always return `None` + // This is a kind of special hardcoded case that should never happen in practice + // as the first thing the pallet does is to initialize the genesis slot. + + assert_eq!(Sassafras::slot_ticket_id(0.into()), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 100), None); + + // Initialize genesis slot.. + GenesisSlot::::set(genesis_slot); + frame_system::Pallet::::set_block_number(One::one()); + + // Try to fetch a ticket for a slot before current epoch. + assert_eq!(Sassafras::slot_ticket_id(0.into()), None); + + // Current epoch tickets. + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 0), Some(curr_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 1), Some(curr_tickets[3])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 2), Some(curr_tickets[5])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 3), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 4), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 5), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 6), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 7), Some(curr_tickets[4])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 8), Some(curr_tickets[2])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 9), Some(curr_tickets[0])); + + // Next epoch tickets (note that only 5 tickets are available) + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 10), Some(next_tickets[1])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 11), Some(next_tickets[3])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 12), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 13), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 14), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 15), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 16), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 17), Some(next_tickets[4])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 18), Some(next_tickets[2])); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 19), Some(next_tickets[0])); + + // Try to fetch the tickets for slots beyond the next epoch. + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 20), None); + assert_eq!(Sassafras::slot_ticket_id(genesis_slot + 42), None); + }); +} + +// Different test for outside-in test with more focus on corner case correctness. +#[test] +fn slot_ticket_id_outside_in_fetch_corner_cases() { + new_test_ext(0).execute_with(|| { + frame_system::Pallet::::set_block_number(One::one()); + + let mut meta = TicketsMetadata { tickets_count: [0, 0], unsorted_tickets_count: 0 }; + let curr_epoch_idx = EpochIndex::::get(); + + let mut epoch_test = |epoch_idx| { + let tag = (epoch_idx & 1) as u8; + let epoch_start = Sassafras::epoch_start(epoch_idx); + + // cleanup + meta.tickets_count = [0, 0]; + TicketsMeta::::set(meta); + assert!((0..10).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); + + meta.tickets_count[tag as usize] += 1; + TicketsMeta::::set(meta); + TicketsIds::::insert((tag, 0_u32), 1_u128); + assert_eq!(Sassafras::slot_ticket_id((epoch_start + 9).into()), Some(1_u128)); + assert!((0..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); + + meta.tickets_count[tag as usize] += 1; + TicketsMeta::::set(meta); + TicketsIds::::insert((tag, 1_u32), 2_u128); + assert_eq!(Sassafras::slot_ticket_id((epoch_start + 0).into()), Some(2_u128)); + assert!((1..9).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); + + meta.tickets_count[tag as usize] += 2; + TicketsMeta::::set(meta); + TicketsIds::::insert((tag, 2_u32), 3_u128); + assert_eq!(Sassafras::slot_ticket_id((epoch_start + 8).into()), Some(3_u128)); + assert!((1..8).all(|i| Sassafras::slot_ticket_id((epoch_start + i).into()).is_none())); + }; + + // Even epoch + epoch_test(curr_epoch_idx); + epoch_test(curr_epoch_idx + 1); + }); +} + +#[test] +fn on_first_block_after_genesis() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + let digest = initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + let common_assertions = || { + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_index(), 0); + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") + ); + }; + + // Post-initialization status + + assert!(ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("f0d42f6b7c0d157ecbd788be44847b80a96c290c04b5dfa5d1d40c98aa0c04ed") + ); + + let header = finalize_block(start_block); + + // Post-finalization status + + assert!(!ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + + // Genesis epoch start deposits consensus + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: Sassafras::next_authorities().into_inner(), + randomness: Sassafras::next_randomness(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn on_normal_block() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let start_slot = Slot::from(100); + let start_block = 1; + let end_block = start_block + 1; + + ext.execute_with(|| { + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // We don't want to trigger an epoch change in this test. + let epoch_length = Sassafras::epoch_length() as u64; + assert!(epoch_length > end_block); + + // Progress to block 2 + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + let common_assertions = || { + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + 1); + assert_eq!(Sassafras::epoch_index(), 0); + assert_eq!(Sassafras::current_epoch_start(), start_slot); + assert_eq!(Sassafras::current_slot_index(), 1); + assert_eq!(Sassafras::randomness(), [0; 32]); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") + ); + }; + + // Post-initialization status + + assert!(ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("9f2b9fd19a772c34d437dcd8b84a927e73a5cb43d3d1cd00093223d60d2b4843"), + ); + + let header = finalize_block(end_block); + + // Post-finalization status + + assert!(!ClaimTemporaryData::::exists()); + common_assertions(); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("be9261adb9686dfd3f23f8a276b7acc7f4beb3137070beb64c282ac22d84cbf0"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 1); + assert_eq!(header.digest.logs[0], digest.logs[0]); + }); +} + +#[test] +fn produce_epoch_change_digest_no_config() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + // We want to trigger an epoch change in this test. + let epoch_length = Sassafras::epoch_length() as u64; + let end_block = start_block + epoch_length; + + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + let common_assertions = || { + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + epoch_length); + assert_eq!(Sassafras::epoch_index(), 1); + assert_eq!(Sassafras::current_epoch_start(), start_slot + epoch_length); + assert_eq!(Sassafras::current_slot_index(), 0); + println!("[DEBUG] {}", b2h(Sassafras::randomness())); + assert_eq!( + Sassafras::randomness(), + h2b("a49592ef190b96f3eb87bde4c8355e33df28c75006156e8c81998158de2ed49e") + ); + }; + + // Post-initialization status + + assert!(ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"), + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("bf0f1228f4ff953c8c1bda2cceb668bf86ea05d7ae93e26d021c9690995d5279"), + ); + + let header = finalize_block(end_block); + + // Post-finalization status + + assert!(!ClaimTemporaryData::::exists()); + common_assertions(); + println!("[DEBUG] {}", b2h(Sassafras::next_randomness())); + assert_eq!( + Sassafras::next_randomness(), + h2b("d3a18b857af6ecc7b52f047107e684fff0058b5722d540a296d727e37eaa55b3"), + ); + println!("[DEBUG] {}", b2h(Sassafras::randomness_accumulator())); + assert_eq!( + Sassafras::randomness_accumulator(), + h2b("8a1ceb346036c386d021264b10912c8b656799668004c4a487222462b394cd89"), + ); + + // Header data check + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: Sassafras::next_authorities().into_inner(), + randomness: Sassafras::next_randomness(), + config: None, + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn produce_epoch_change_digest_with_config() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + + ext.execute_with(|| { + let start_slot = Slot::from(100); + let start_block = 1; + + initialize_block(start_block, start_slot, Default::default(), &pairs[0]); + + let config = EpochConfiguration { redundancy_factor: 1, attempts_number: 123 }; + Sassafras::plan_config_change(RuntimeOrigin::root(), config).unwrap(); + + // We want to trigger an epoch change in this test. + let epoch_length = Sassafras::epoch_length() as u64; + let end_block = start_block + epoch_length; + + let digest = progress_to_block(end_block, &pairs[0]).unwrap(); + + let header = finalize_block(end_block); + + // Header data check. + // Skip pallet status checks that were already performed by other tests. + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(header.digest.logs[0], digest.logs[0]); + // Deposits consensus log on epoch change + let consensus_log = sp_consensus_sassafras::digests::ConsensusLog::NextEpochData( + sp_consensus_sassafras::digests::NextEpochDescriptor { + authorities: Sassafras::next_authorities().into_inner(), + randomness: Sassafras::next_randomness(), + config: Some(config), + }, + ); + let consensus_digest = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, consensus_log.encode()); + assert_eq!(header.digest.logs[1], consensus_digest) + }) +} + +#[test] +fn segments_incremental_sort_works() { + let (pairs, mut ext) = new_test_ext_with_pairs(1, false); + let pair = &pairs[0]; + let segments_count = 14; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_length = Sassafras::epoch_length() as u64; + // -3 just to have the last segment not full... + let submitted_tickets_count = segments_count * SEGMENT_MAX_SIZE - 3; + + initialize_block(start_block, start_slot, Default::default(), pair); + + // Manually populate the segments to skip the threshold check + let mut tickets = make_ticket_bodies(submitted_tickets_count, None); + persist_next_epoch_tickets_as_segments(&tickets); + + // Proceed to half of the epoch (sortition should not have been started yet) + let half_epoch_block = start_block + epoch_length / 2; + progress_to_block(half_epoch_block, pair); + + let mut unsorted_tickets_count = submitted_tickets_count; + + // Check that next epoch tickets sortition is not started yet + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, 0]); + + // Follow the incremental sortition block by block + + progress_to_block(half_epoch_block + 1, pair); + unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE - 3; + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count,); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 2, pair); + unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 3, pair); + unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, 0]); + + progress_to_block(half_epoch_block + 4, pair); + unsorted_tickets_count -= 3 * SEGMENT_MAX_SIZE; + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, 0]); + + let header = finalize_block(half_epoch_block + 4); + + // Sort should be finished now. + // Check that next epoch tickets count have the correct value. + // Bigger ticket ids were discarded during sortition. + unsorted_tickets_count -= 2 * SEGMENT_MAX_SIZE; + assert_eq!(unsorted_tickets_count, 0); + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, unsorted_tickets_count); + assert_eq!(meta.tickets_count, [0, epoch_length as u32]); + // Epoch change log should have been pushed as well + assert_eq!(header.digest.logs.len(), 1); + // No tickets for the current epoch + assert_eq!(TicketsIds::::get((0, 0)), None); + + // Check persistence of "winning" tickets + tickets.sort_by_key(|t| t.0); + (0..epoch_length as usize).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), tickets[i]); + }); + // Check removal of "loosing" tickets + (epoch_length as usize..tickets.len()).into_iter().for_each(|i| { + assert!(TicketsIds::::get((1, i as u32)).is_none()); + assert!(TicketsData::::get(tickets[i].0).is_none()); + }); + + // The next block will be the first produced on the new epoch. + // At this point the tickets are found already sorted and ready to be used. + let slot = Sassafras::current_slot() + 1; + let number = System::block_number() + 1; + initialize_block(number, slot, header.hash(), pair); + let header = finalize_block(number); + // Epoch changes digest is also produced + assert_eq!(header.digest.logs.len(), 2); + }); +} + +#[test] +fn tickets_fetch_works_after_epoch_change() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + let submitted_tickets = 300; + + ext.execute_with(|| { + initialize_block(start_block, start_slot, Default::default(), pair); + + // We don't want to trigger an epoch change in this test. + let epoch_length = Sassafras::epoch_length() as u64; + assert!(epoch_length > 2); + progress_to_block(2, &pairs[0]).unwrap(); + + // Persist tickets as three different segments. + let tickets = make_ticket_bodies(submitted_tickets, None); + persist_next_epoch_tickets_as_segments(&tickets); + + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, submitted_tickets); + assert_eq!(meta.tickets_count, [0, 0]); + + // Progress up to the last epoch slot (do not enact epoch change) + progress_to_block(epoch_length, &pairs[0]).unwrap(); + + // At this point next epoch tickets should have been sorted and ready to be used + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, 0); + assert_eq!(meta.tickets_count, [0, epoch_length as u32]); + + // Compute and sort the tickets ids (aka tickets scores) + let mut expected_ids: Vec<_> = tickets.into_iter().map(|(id, _)| id).collect(); + expected_ids.sort(); + expected_ids.truncate(epoch_length as usize); + + // Check if we can fetch next epoch tickets ids (outside-in). + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[1]); + assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[3]); + assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[5]); + assert_eq!(Sassafras::slot_ticket_id(slot + 4).unwrap(), expected_ids[7]); + assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[6]); + assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[4]); + assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[2]); + assert_eq!(Sassafras::slot_ticket_id(slot + 10).unwrap(), expected_ids[0]); + assert!(Sassafras::slot_ticket_id(slot + 11).is_none()); + + // Enact epoch change by progressing one more block + + progress_to_block(epoch_length + 1, &pairs[0]).unwrap(); + + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, 0); + assert_eq!(meta.tickets_count, [0, 10]); + + // Check if we can fetch current epoch tickets ids (outside-in). + let slot = Sassafras::current_slot(); + assert_eq!(Sassafras::slot_ticket_id(slot).unwrap(), expected_ids[1]); + assert_eq!(Sassafras::slot_ticket_id(slot + 1).unwrap(), expected_ids[3]); + assert_eq!(Sassafras::slot_ticket_id(slot + 2).unwrap(), expected_ids[5]); + assert_eq!(Sassafras::slot_ticket_id(slot + 3).unwrap(), expected_ids[7]); + assert_eq!(Sassafras::slot_ticket_id(slot + 6).unwrap(), expected_ids[6]); + assert_eq!(Sassafras::slot_ticket_id(slot + 7).unwrap(), expected_ids[4]); + assert_eq!(Sassafras::slot_ticket_id(slot + 8).unwrap(), expected_ids[2]); + assert_eq!(Sassafras::slot_ticket_id(slot + 9).unwrap(), expected_ids[0]); + assert!(Sassafras::slot_ticket_id(slot + 10).is_none()); + + // Enact another epoch change, for which we don't have any ticket + progress_to_block(2 * epoch_length + 1, &pairs[0]).unwrap(); + let meta = TicketsMeta::::get(); + assert_eq!(meta.unsorted_tickets_count, 0); + assert_eq!(meta.tickets_count, [0, 0]); + }); +} + +#[test] +fn block_allowed_to_skip_epochs() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_length = Sassafras::epoch_length() as u64; + + initialize_block(start_block, start_slot, Default::default(), pair); + + let tickets = make_ticket_bodies(3, Some(pair)); + persist_next_epoch_tickets(&tickets); + + let next_random = Sassafras::next_randomness(); + + // We want to skip 3 epochs in this test. + let offset = 4 * epoch_length; + go_to_block(start_block + offset, start_slot + offset, &pairs[0]); + + // Post-initialization status + + assert!(ClaimTemporaryData::::exists()); + assert_eq!(Sassafras::genesis_slot(), start_slot); + assert_eq!(Sassafras::current_slot(), start_slot + offset); + assert_eq!(Sassafras::epoch_index(), 4); + assert_eq!(Sassafras::current_epoch_start(), start_slot + offset); + assert_eq!(Sassafras::current_slot_index(), 0); + + // Tickets data has been discarded + assert_eq!(TicketsMeta::::get(), TicketsMetadata::default()); + assert!(tickets.iter().all(|(id, _)| TicketsData::::get(id).is_none())); + assert_eq!(SortedCandidates::::get().len(), 0); + + // We used the last known next epoch randomness as a fallback + assert_eq!(next_random, Sassafras::randomness()); + }); +} + +#[test] +fn obsolete_tickets_are_removed_on_epoch_change() { + let (pairs, mut ext) = new_test_ext_with_pairs(4, false); + let pair = &pairs[0]; + let start_slot = Slot::from(100); + let start_block = 1; + + ext.execute_with(|| { + let epoch_length = Sassafras::epoch_length() as u64; + + initialize_block(start_block, start_slot, Default::default(), pair); + + let tickets = make_ticket_bodies(10, Some(pair)); + let mut epoch1_tickets = tickets[..4].to_vec(); + let mut epoch2_tickets = tickets[4..].to_vec(); + + // Persist some tickets for next epoch (N) + persist_next_epoch_tickets(&epoch1_tickets); + assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); + // Check next epoch tickets presence + epoch1_tickets.sort_by_key(|t| t.0); + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch1_tickets[i]); + }); + + // Advance one epoch to enact the tickets + go_to_block(start_block + epoch_length, start_slot + epoch_length, pair); + assert_eq!(TicketsMeta::::get().tickets_count, [0, 4]); + + // Persist some tickets for next epoch (N+1) + persist_next_epoch_tickets(&epoch2_tickets); + assert_eq!(TicketsMeta::::get().tickets_count, [6, 4]); + epoch2_tickets.sort_by_key(|t| t.0); + // Check for this epoch and next epoch tickets presence + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch1_tickets[i]); + }); + (0..epoch2_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((0, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch2_tickets[i]); + }); + + // Advance to epoch 2 and check for cleanup + + go_to_block(start_block + 2 * epoch_length, start_slot + 2 * epoch_length, pair); + assert_eq!(TicketsMeta::::get().tickets_count, [6, 0]); + + (0..epoch1_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((1, i as u32)).unwrap(); + assert!(TicketsData::::get(id).is_none()); + }); + (0..epoch2_tickets.len()).into_iter().for_each(|i| { + let id = TicketsIds::::get((0, i as u32)).unwrap(); + let body = TicketsData::::get(id).unwrap(); + assert_eq!((id, body), epoch2_tickets[i]); + }); + }) +} + +const TICKETS_FILE: &str = "src/data/25_tickets_100_auths.bin"; + +fn data_read(filename: &str) -> T { + use std::{fs::File, io::Read}; + let mut file = File::open(filename).unwrap(); + let mut buf = Vec::new(); + file.read_to_end(&mut buf).unwrap(); + T::decode(&mut &buf[..]).unwrap() +} + +fn data_write(filename: &str, data: T) { + use std::{fs::File, io::Write}; + let mut file = File::create(filename).unwrap(); + let buf = data.encode(); + file.write_all(&buf).unwrap(); +} + +// We don't want to implement anything secure here. +// Just a trivial shuffle for the tests. +fn trivial_fisher_yates_shuffle(vector: &mut Vec, random_seed: u64) { + let mut rng = random_seed as usize; + for i in (1..vector.len()).rev() { + let j = rng % (i + 1); + vector.swap(i, j); + rng = (rng.wrapping_mul(6364793005) + 1) as usize; // Some random number generation + } +} + +// For this test we use a set of pre-constructed tickets from a file. +// Creating a large set of tickets on the fly takes time, and may be annoying +// for test execution. +// +// A valid ring-context is required for this test since we are passing through the +// `submit_ticket` call which tests for ticket validity. +#[test] +fn submit_tickets_with_ring_proof_check_works() { + use sp_core::Pair as _; + // env_logger::init(); + + let (authorities, mut tickets): (Vec, Vec) = + data_read(TICKETS_FILE); + + // Also checks that duplicates are discarded + tickets.extend(tickets.clone()); + trivial_fisher_yates_shuffle(&mut tickets, 321); + + let (pairs, mut ext) = new_test_ext_with_pairs(authorities.len(), true); + let pair = &pairs[0]; + // Check if deserialized data has been generated for the correct set of authorities... + assert!(authorities.iter().zip(pairs.iter()).all(|(auth, pair)| auth == &pair.public())); + + ext.execute_with(|| { + let start_slot = Slot::from(0); + let start_block = 1; + + // Tweak the config to discard ~half of the tickets. + let mut config = EpochConfig::::get(); + config.redundancy_factor = 25; + EpochConfig::::set(config); + + initialize_block(start_block, start_slot, Default::default(), pair); + NextRandomness::::set([0; 32]); + + // Check state before tickets submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { unsorted_tickets_count: 0, tickets_count: [0, 0] }, + ); + + // Submit the tickets + let max_tickets_per_call = Sassafras::epoch_length() as usize; + tickets.chunks(max_tickets_per_call).for_each(|chunk| { + let chunk = BoundedVec::truncate_from(chunk.to_vec()); + Sassafras::submit_tickets(RuntimeOrigin::none(), chunk).unwrap(); + }); + + // Check state after submission + assert_eq!( + TicketsMeta::::get(), + TicketsMetadata { unsorted_tickets_count: 16, tickets_count: [0, 0] }, + ); + assert_eq!(UnsortedSegments::::get(0).len(), 16); + assert_eq!(UnsortedSegments::::get(1).len(), 0); + + finalize_block(start_block); + }) +} + +#[test] +#[ignore = "test tickets data generator"] +fn make_tickets_data() { + use super::*; + use sp_core::crypto::Pair; + + // Number of authorities who produces tickets (for the sake of this test) + let tickets_authors_count = 5; + // Total number of authorities (the ring) + let authorities_count = 100; + let (pairs, mut ext) = new_test_ext_with_pairs(authorities_count, true); + + let authorities: Vec<_> = pairs.iter().map(|sk| sk.public()).collect(); + + ext.execute_with(|| { + let config = EpochConfig::::get(); + + let tickets_count = tickets_authors_count * config.attempts_number as usize; + let mut tickets = Vec::with_capacity(tickets_count); + + // Construct pre-built tickets with a well known `NextRandomness` value. + NextRandomness::::set([0; 32]); + + println!("Constructing {} tickets", tickets_count); + pairs.iter().take(tickets_authors_count).enumerate().for_each(|(i, pair)| { + let t = make_tickets(config.attempts_number, pair); + tickets.extend(t); + println!("{:.2}%", 100f32 * ((i + 1) as f32 / tickets_authors_count as f32)); + }); + + data_write(TICKETS_FILE, (authorities, tickets)); + }); +} diff --git a/substrate/frame/sassafras/src/weights.rs b/substrate/frame/sassafras/src/weights.rs new file mode 100644 index 00000000000..32ea2d29a18 --- /dev/null +++ b/substrate/frame/sassafras/src/weights.rs @@ -0,0 +1,425 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_sassafras` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2023-11-16, STEPS: `20`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `behemoth`, CPU: `AMD Ryzen Threadripper 3970X 32-Core Processor` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// ./target/release/node-template +// benchmark +// pallet +// --chain +// dev +// --pallet +// pallet_sassafras +// --extrinsic +// * +// --steps +// 20 +// --repeat +// 3 +// --output +// weights.rs +// --template +// substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_sassafras`. +pub trait WeightInfo { + fn on_initialize() -> Weight; + fn enact_epoch_change(x: u32, y: u32, ) -> Weight; + fn submit_tickets(x: u32, ) -> Weight; + fn plan_config_change() -> Weight; + fn update_ring_verifier(x: u32, ) -> Weight; + fn load_ring_context() -> Weight; + fn sort_segments(x: u32, ) -> Weight; +} + +/// Weights for `pallet_sassafras` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:0) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentRandomness` (r:1 w:0) + /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:0) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1) + /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentSlot` (r:0 w:1) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1) + /// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:0 w:1) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn on_initialize() -> Weight { + // Proof Size summary in bytes: + // Measured: `302` + // Estimated: `4787` + // Minimum execution time: 438_039_000 picoseconds. + Weight::from_parts(439_302_000, 4787) + .saturating_add(T::DbWeight::get().reads(6_u64)) + .saturating_add(T::DbWeight::get().writes(5_u64)) + } + /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:1) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:1) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) + /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:1) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0) + /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextEpochConfig` (r:1 w:1) + /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1) + /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) + /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:79 w:79) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsIds` (r:5000 w:200) + /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::Authorities` (r:0 w:1) + /// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:0 w:9896) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochConfig` (r:0 w:1) + /// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentRandomness` (r:0 w:1) + /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[1000, 5000]`. + fn enact_epoch_change(x: u32, y: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `594909 + x * (33 ±0) + y * (53 ±0)` + // Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)` + // Minimum execution time: 121_279_846_000 picoseconds. + Weight::from_parts(94_454_851_972, 593350) + // Standard Error: 24_177_301 + .saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into())) + // Standard Error: 601_053 + .saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into())) + .saturating_add(T::DbWeight::get().reads(13_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(y.into()))) + .saturating_add(T::DbWeight::get().writes(112_u64)) + .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(y.into()))) + .saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into())) + } + /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:0) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:1 w:0) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextEpochConfig` (r:1 w:0) + /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:0) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:25 w:25) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) + /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:1 w:1) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 25]`. + fn submit_tickets(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3869` + // Estimated: `5519 + x * (2559 ±0)` + // Minimum execution time: 36_904_934_000 picoseconds. + Weight::from_parts(25_822_957_295, 5519) + // Standard Error: 11_047_832 + .saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(T::DbWeight::get().writes(2_u64)) + .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into())) + } + /// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1) + /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn plan_config_change() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_038_000 picoseconds. + Weight::from_parts(4_499_000, 0) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + fn update_ring_verifier(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `590485` + // Estimated: `591809` + // Minimum execution time: 105_121_424_000 picoseconds. + Weight::from_parts(105_527_334_385, 591809) + // Standard Error: 2_933_910 + .saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + fn load_ring_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `590485` + // Estimated: `591809` + // Minimum execution time: 44_005_681_000 picoseconds. + Weight::from_parts(44_312_079_000, 591809) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) + /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:100 w:100) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsIds` (r:0 w:200) + /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:0 w:12600) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + fn sort_segments(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `222 + x * (2060 ±0)` + // Estimated: `4687 + x * (4529 ±0)` + // Minimum execution time: 183_501_000 picoseconds. + Weight::from_parts(183_501_000, 4687) + // Standard Error: 1_426_363 + .saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into())) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(T::DbWeight::get().writes((129_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into())) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:0) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentRandomness` (r:1 w:0) + /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:0) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:1) + /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentSlot` (r:0 w:1) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::ClaimTemporaryData` (r:0 w:1) + /// Proof: `Sassafras::ClaimTemporaryData` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:0 w:1) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn on_initialize() -> Weight { + // Proof Size summary in bytes: + // Measured: `302` + // Estimated: `4787` + // Minimum execution time: 438_039_000 picoseconds. + Weight::from_parts(439_302_000, 4787) + .saturating_add(RocksDbWeight::get().reads(6_u64)) + .saturating_add(RocksDbWeight::get().writes(5_u64)) + } + /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:1) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:1) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) + /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:1) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RandomnessAccumulator` (r:1 w:0) + /// Proof: `Sassafras::RandomnessAccumulator` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextEpochConfig` (r:1 w:1) + /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::PendingEpochConfigChange` (r:1 w:1) + /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `System::Digest` (r:1 w:1) + /// Proof: `System::Digest` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) + /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:79 w:79) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsIds` (r:5000 w:200) + /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::Authorities` (r:0 w:1) + /// Proof: `Sassafras::Authorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:0 w:9896) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochConfig` (r:0 w:1) + /// Proof: `Sassafras::EpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::CurrentRandomness` (r:0 w:1) + /// Proof: `Sassafras::CurrentRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + /// The range of component `y` is `[1000, 5000]`. + fn enact_epoch_change(x: u32, y: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `594909 + x * (33 ±0) + y * (53 ±0)` + // Estimated: `593350 + x * (24 ±1) + y * (2496 ±0)` + // Minimum execution time: 121_279_846_000 picoseconds. + Weight::from_parts(94_454_851_972, 593350) + // Standard Error: 24_177_301 + .saturating_add(Weight::from_parts(8_086_191, 0).saturating_mul(x.into())) + // Standard Error: 601_053 + .saturating_add(Weight::from_parts(15_578_413, 0).saturating_mul(y.into())) + .saturating_add(RocksDbWeight::get().reads(13_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(y.into()))) + .saturating_add(RocksDbWeight::get().writes(112_u64)) + .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(y.into()))) + .saturating_add(Weight::from_parts(0, 24).saturating_mul(x.into())) + .saturating_add(Weight::from_parts(0, 2496).saturating_mul(y.into())) + } + /// Storage: `Sassafras::CurrentSlot` (r:1 w:0) + /// Proof: `Sassafras::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::EpochIndex` (r:1 w:0) + /// Proof: `Sassafras::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::GenesisSlot` (r:1 w:0) + /// Proof: `Sassafras::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:1 w:0) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextAuthorities` (r:1 w:0) + /// Proof: `Sassafras::NextAuthorities` (`max_values`: Some(1), `max_size`: Some(3302), added: 3797, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextEpochConfig` (r:1 w:0) + /// Proof: `Sassafras::NextEpochConfig` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::NextRandomness` (r:1 w:0) + /// Proof: `Sassafras::NextRandomness` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:25 w:25) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsMeta` (r:1 w:1) + /// Proof: `Sassafras::TicketsMeta` (`max_values`: Some(1), `max_size`: Some(12), added: 507, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:1 w:1) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 25]`. + fn submit_tickets(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3869` + // Estimated: `5519 + x * (2559 ±0)` + // Minimum execution time: 36_904_934_000 picoseconds. + Weight::from_parts(25_822_957_295, 5519) + // Standard Error: 11_047_832 + .saturating_add(Weight::from_parts(11_338_353_299, 0).saturating_mul(x.into())) + .saturating_add(RocksDbWeight::get().reads(9_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(RocksDbWeight::get().writes(2_u64)) + .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 2559).saturating_mul(x.into())) + } + /// Storage: `Sassafras::PendingEpochConfigChange` (r:0 w:1) + /// Proof: `Sassafras::PendingEpochConfigChange` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + fn plan_config_change() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 4_038_000 picoseconds. + Weight::from_parts(4_499_000, 0) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::RingVerifierData` (r:0 w:1) + /// Proof: `Sassafras::RingVerifierData` (`max_values`: Some(1), `max_size`: Some(388), added: 883, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + fn update_ring_verifier(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `590485` + // Estimated: `591809` + // Minimum execution time: 105_121_424_000 picoseconds. + Weight::from_parts(105_527_334_385, 591809) + // Standard Error: 2_933_910 + .saturating_add(Weight::from_parts(96_136_261, 0).saturating_mul(x.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } + /// Storage: `Sassafras::RingContext` (r:1 w:0) + /// Proof: `Sassafras::RingContext` (`max_values`: Some(1), `max_size`: Some(590324), added: 590819, mode: `MaxEncodedLen`) + fn load_ring_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `590485` + // Estimated: `591809` + // Minimum execution time: 44_005_681_000 picoseconds. + Weight::from_parts(44_312_079_000, 591809) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Sassafras::SortedCandidates` (r:1 w:0) + /// Proof: `Sassafras::SortedCandidates` (`max_values`: Some(1), `max_size`: Some(3202), added: 3697, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::UnsortedSegments` (r:100 w:100) + /// Proof: `Sassafras::UnsortedSegments` (`max_values`: None, `max_size`: Some(2054), added: 4529, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsIds` (r:0 w:200) + /// Proof: `Sassafras::TicketsIds` (`max_values`: None, `max_size`: Some(21), added: 2496, mode: `MaxEncodedLen`) + /// Storage: `Sassafras::TicketsData` (r:0 w:12600) + /// Proof: `Sassafras::TicketsData` (`max_values`: None, `max_size`: Some(84), added: 2559, mode: `MaxEncodedLen`) + /// The range of component `x` is `[1, 100]`. + fn sort_segments(x: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `222 + x * (2060 ±0)` + // Estimated: `4687 + x * (4529 ±0)` + // Minimum execution time: 183_501_000 picoseconds. + Weight::from_parts(183_501_000, 4687) + // Standard Error: 1_426_363 + .saturating_add(Weight::from_parts(169_156_241, 0).saturating_mul(x.into())) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().reads((1_u64).saturating_mul(x.into()))) + .saturating_add(RocksDbWeight::get().writes((129_u64).saturating_mul(x.into()))) + .saturating_add(Weight::from_parts(0, 4529).saturating_mul(x.into())) + } +} diff --git a/substrate/primitives/consensus/sassafras/Cargo.toml b/substrate/primitives/consensus/sassafras/Cargo.toml index 67f09e2b904..e71f82b4382 100644 --- a/substrate/primitives/consensus/sassafras/Cargo.toml +++ b/substrate/primitives/consensus/sassafras/Cargo.toml @@ -18,12 +18,12 @@ targets = ["x86_64-unknown-linux-gnu"] scale-codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false } scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } serde = { version = "1.0.193", default-features = false, features = ["derive"], optional = true } -sp-api = { default-features = false, path = "../../api" } -sp-application-crypto = { default-features = false, path = "../../application-crypto", features = ["bandersnatch-experimental"] } -sp-consensus-slots = { default-features = false, path = "../slots" } -sp-core = { default-features = false, path = "../../core", features = ["bandersnatch-experimental"] } -sp-runtime = { default-features = false, path = "../../runtime" } -sp-std = { default-features = false, path = "../../std" } +sp-api = { path = "../../api", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false, features = ["bandersnatch-experimental"] } +sp-consensus-slots = { path = "../slots", default-features = false } +sp-core = { path = "../../core", default-features = false, features = ["bandersnatch-experimental"] } +sp-runtime = { path = "../../runtime", default-features = false } +sp-std = { path = "../../std", default-features = false } [features] default = ["std"] diff --git a/substrate/primitives/consensus/sassafras/README.md b/substrate/primitives/consensus/sassafras/README.md index b0f3685494e..d6251940a49 100644 --- a/substrate/primitives/consensus/sassafras/README.md +++ b/substrate/primitives/consensus/sassafras/README.md @@ -1,12 +1,6 @@ Primitives for SASSAFRAS. -# ⚠️ WARNING ⚠️ +- Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41 +- RFC proposal: https://github.com/polkadot-fellows/RFCs/pull/26 -The crate interfaces and structures are highly experimental and may be subject -to significant changes. - -Depends on upstream experimental feature: `bandersnatch-experimental`. - -These structs were mostly extracted from the main SASSAFRAS protocol PR: https://github.com/paritytech/polkadot-sdk/pull/1336. - -Tracking issue: https://github.com/paritytech/polkadot-sdk/issues/41 +Depends on `sp-core` feature: `bandersnatch-experimental`. diff --git a/substrate/primitives/consensus/sassafras/src/digests.rs b/substrate/primitives/consensus/sassafras/src/digests.rs index 95a305099de..5274f1309d8 100644 --- a/substrate/primitives/consensus/sassafras/src/digests.rs +++ b/substrate/primitives/consensus/sassafras/src/digests.rs @@ -48,11 +48,11 @@ pub struct SlotClaim { /// This is mandatory in the first block of each epoch. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct NextEpochDescriptor { + /// Randomness value. + pub randomness: Randomness, /// Authorities list. pub authorities: Vec, - /// Epoch randomness. - pub randomness: Randomness, - /// Epoch configurable parameters. + /// Epoch configuration. /// /// If not present previous epoch parameters are used. pub config: Option, diff --git a/substrate/primitives/consensus/sassafras/src/lib.rs b/substrate/primitives/consensus/sassafras/src/lib.rs index e421e771d40..1752f765886 100644 --- a/substrate/primitives/consensus/sassafras/src/lib.rs +++ b/substrate/primitives/consensus/sassafras/src/lib.rs @@ -80,33 +80,43 @@ pub type EquivocationProof = sp_consensus_slots::EquivocationProof, - /// Randomness for the epoch. + /// Epoch index. + pub index: u64, + /// Starting slot of the epoch. + pub start: Slot, + /// Number of slots in the epoch. + pub length: u32, + /// Randomness value. pub randomness: Randomness, + /// Authorities list. + pub authorities: Vec, /// Epoch configuration. pub config: EpochConfiguration, } diff --git a/substrate/primitives/consensus/sassafras/src/ticket.rs b/substrate/primitives/consensus/sassafras/src/ticket.rs index d81770c96d9..dc0a61990d3 100644 --- a/substrate/primitives/consensus/sassafras/src/ticket.rs +++ b/substrate/primitives/consensus/sassafras/src/ticket.rs @@ -62,10 +62,10 @@ pub struct TicketClaim { pub erased_signature: EphemeralSignature, } -/// Computes ticket-id maximum allowed value for a given epoch. +/// Computes a boundary for [`TicketId`] maximum allowed value for a given epoch. /// -/// Only ticket identifiers below this threshold should be considered for slot -/// assignment. +/// Only ticket identifiers below this threshold should be considered as candidates +/// for slot assignment. /// /// The value is computed as `TicketId::MAX*(redundancy*slots)/(attempts*validators)` /// @@ -76,16 +76,51 @@ pub struct TicketClaim { /// - `validators`: number of validators in epoch. /// /// If `attempts * validators = 0` then we return 0. +/// +/// For details about the formula and implications refer to +/// [*probabilities an parameters*](https://research.web3.foundation/Polkadot/protocols/block-production/SASSAFRAS#probabilities-and-parameters) +/// paragraph of the w3f introduction to the protocol. +// TODO: replace with [RFC-26](https://github.com/polkadot-fellows/RFCs/pull/26) +// "Tickets Threshold" paragraph once is merged pub fn ticket_id_threshold( redundancy: u32, slots: u32, attempts: u32, validators: u32, ) -> TicketId { - let den = attempts as u64 * validators as u64; let num = redundancy as u64 * slots as u64; + let den = attempts as u64 * validators as u64; TicketId::max_value() .checked_div(den.into()) .unwrap_or_default() .saturating_mul(num.into()) } + +#[cfg(test)] +mod tests { + use super::*; + + // This is a trivial example/check which just better explain explains the rationale + // behind the threshold. + // + // After this reading the formula should become obvious. + #[test] + fn ticket_id_threshold_trivial_check() { + // For an epoch with `s` slots we want to accept a number of tickets equal to ~s·r + let redundancy = 2; + let slots = 1000; + let attempts = 100; + let validators = 500; + + let threshold = ticket_id_threshold(redundancy, slots, attempts, validators); + let threshold = threshold as f64 / TicketId::MAX as f64; + + // We expect that the total number of tickets allowed to be submited + // is slots*redundancy + let avt = ((attempts * validators) as f64 * threshold) as u32; + assert_eq!(avt, slots * redundancy); + + println!("threshold: {}", threshold); + println!("avt = {}", avt); + } +} diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index d25a656f950..bdbac0aae03 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -23,7 +23,7 @@ use sp_consensus_slots::Slot; use sp_std::vec::Vec; pub use sp_core::bandersnatch::{ - ring_vrf::{RingContext, RingProver, RingVerifier, RingVrfSignature}, + ring_vrf::{RingContext, RingProver, RingVerifier, RingVerifierData, RingVrfSignature}, vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, }; diff --git a/substrate/primitives/core/Cargo.toml b/substrate/primitives/core/Cargo.toml index 9c556c07736..331d762e0d7 100644 --- a/substrate/primitives/core/Cargo.toml +++ b/substrate/primitives/core/Cargo.toml @@ -56,7 +56,7 @@ sp-runtime-interface = { path = "../runtime-interface", default-features = false # bls crypto w3f-bls = { version = "0.1.3", default-features = false, optional = true } # bandersnatch crypto -bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "3ddc205", default-features = false, optional = true } +bandersnatch_vrfs = { git = "https://github.com/w3f/ring-vrf", rev = "2019248", default-features = false, features = ["substrate-curves"], optional = true } [dev-dependencies] criterion = "0.4.0" diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 78b7f12f9ff..1d666f13b62 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -20,13 +20,17 @@ //! //! The primitive can operate both as a regular VRF or as an anonymized Ring VRF. -#[cfg(feature = "std")] +#[cfg(feature = "serde")] use crate::crypto::Ss58Codec; use crate::crypto::{ ByteArray, CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom, VrfPublic, }; #[cfg(feature = "full_crypto")] use crate::crypto::{DeriveError, DeriveJunction, Pair as TraitPair, SecretStringError, VrfSecret}; +#[cfg(feature = "serde")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(all(not(feature = "std"), feature = "serde"))] +use sp_std::alloc::{format, string::String}; use bandersnatch_vrfs::CanonicalSerialize; #[cfg(feature = "full_crypto")] @@ -44,23 +48,12 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); #[cfg(feature = "full_crypto")] pub const SIGNING_CTX: &[u8] = b"BandersnatchSigningContext"; -// Max ring domain size. -const RING_DOMAIN_SIZE: usize = 1024; - #[cfg(feature = "full_crypto")] -const SEED_SERIALIZED_LEN: usize = 32; - -// Short-Weierstrass form serialized sizes. -const PUBLIC_SERIALIZED_LEN: usize = 33; -const SIGNATURE_SERIALIZED_LEN: usize = 65; -const RING_SIGNATURE_SERIALIZED_LEN: usize = 755; -const PREOUT_SERIALIZED_LEN: usize = 33; +const SEED_SERIALIZED_SIZE: usize = 32; -// Max size of serialized ring-vrf context params. -// -// This size is dependent on the ring domain size and the actual value -// is equal to the SCALE encoded size of the `KZG` backend. -const RING_CONTEXT_SERIALIZED_LEN: usize = 147716; +const PUBLIC_SERIALIZED_SIZE: usize = 33; +const SIGNATURE_SERIALIZED_SIZE: usize = 65; +const PREOUT_SERIALIZED_SIZE: usize = 33; /// Bandersnatch public key. #[cfg_attr(feature = "full_crypto", derive(Hash))] @@ -77,16 +70,16 @@ const RING_CONTEXT_SERIALIZED_LEN: usize = 147716; MaxEncodedLen, TypeInfo, )] -pub struct Public(pub [u8; PUBLIC_SERIALIZED_LEN]); +pub struct Public(pub [u8; PUBLIC_SERIALIZED_SIZE]); -impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_LEN]> for Public { - fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_LEN]) -> Self { +impl UncheckedFrom<[u8; PUBLIC_SERIALIZED_SIZE]> for Public { + fn unchecked_from(raw: [u8; PUBLIC_SERIALIZED_SIZE]) -> Self { Public(raw) } } -impl AsRef<[u8; PUBLIC_SERIALIZED_LEN]> for Public { - fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_LEN] { +impl AsRef<[u8; PUBLIC_SERIALIZED_SIZE]> for Public { + fn as_ref(&self) -> &[u8; PUBLIC_SERIALIZED_SIZE] { &self.0 } } @@ -107,17 +100,17 @@ impl TryFrom<&[u8]> for Public { type Error = (); fn try_from(data: &[u8]) -> Result { - if data.len() != PUBLIC_SERIALIZED_LEN { + if data.len() != PUBLIC_SERIALIZED_SIZE { return Err(()) } - let mut r = [0u8; PUBLIC_SERIALIZED_LEN]; + let mut r = [0u8; PUBLIC_SERIALIZED_SIZE]; r.copy_from_slice(data); Ok(Self::unchecked_from(r)) } } impl ByteArray for Public { - const LEN: usize = PUBLIC_SERIALIZED_LEN; + const LEN: usize = PUBLIC_SERIALIZED_SIZE; } impl TraitPublic for Public {} @@ -142,16 +135,31 @@ impl sp_std::fmt::Debug for Public { } } +#[cfg(feature = "serde")] +impl Serialize for Public { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&self.to_ss58check()) + } +} + +#[cfg(feature = "serde")] +impl<'de> Deserialize<'de> for Public { + fn deserialize>(deserializer: D) -> Result { + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } +} + /// Bandersnatch signature. /// /// The signature is created via the [`VrfSecret::vrf_sign`] using [`SIGNING_CTX`] as transcript /// `label`. #[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, PassByInner, MaxEncodedLen, TypeInfo)] -pub struct Signature([u8; SIGNATURE_SERIALIZED_LEN]); +pub struct Signature([u8; SIGNATURE_SERIALIZED_SIZE]); -impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_LEN]> for Signature { - fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_LEN]) -> Self { +impl UncheckedFrom<[u8; SIGNATURE_SERIALIZED_SIZE]> for Signature { + fn unchecked_from(raw: [u8; SIGNATURE_SERIALIZED_SIZE]) -> Self { Signature(raw) } } @@ -172,17 +180,17 @@ impl TryFrom<&[u8]> for Signature { type Error = (); fn try_from(data: &[u8]) -> Result { - if data.len() != SIGNATURE_SERIALIZED_LEN { + if data.len() != SIGNATURE_SERIALIZED_SIZE { return Err(()) } - let mut r = [0u8; SIGNATURE_SERIALIZED_LEN]; + let mut r = [0u8; SIGNATURE_SERIALIZED_SIZE]; r.copy_from_slice(data); Ok(Self::unchecked_from(r)) } } impl ByteArray for Signature { - const LEN: usize = SIGNATURE_SERIALIZED_LEN; + const LEN: usize = SIGNATURE_SERIALIZED_SIZE; } impl CryptoType for Signature { @@ -204,7 +212,7 @@ impl sp_std::fmt::Debug for Signature { /// The raw secret seed, which can be used to reconstruct the secret [`Pair`]. #[cfg(feature = "full_crypto")] -type Seed = [u8; SEED_SERIALIZED_LEN]; +type Seed = [u8; SEED_SERIALIZED_SIZE]; /// Bandersnatch secret key. #[cfg(feature = "full_crypto")] @@ -232,10 +240,10 @@ impl TraitPair for Pair { /// /// The slice must be 32 bytes long or it will return an error. fn from_seed_slice(seed_slice: &[u8]) -> Result { - if seed_slice.len() != SEED_SERIALIZED_LEN { + if seed_slice.len() != SEED_SERIALIZED_SIZE { return Err(SecretStringError::InvalidSeedLength) } - let mut seed = [0; SEED_SERIALIZED_LEN]; + let mut seed = [0; SEED_SERIALIZED_SIZE]; seed.copy_from_slice(seed_slice); let secret = SecretKey::from_seed(&seed); Ok(Pair { secret, seed }) @@ -266,7 +274,7 @@ impl TraitPair for Pair { fn public(&self) -> Public { let public = self.secret.to_public(); - let mut raw = [0; PUBLIC_SERIALIZED_LEN]; + let mut raw = [0; PUBLIC_SERIALIZED_SIZE]; public .serialize_compressed(raw.as_mut_slice()) .expect("serialization length is constant and checked by test; qed"); @@ -344,7 +352,7 @@ pub mod vrf { impl Encode for VrfOutput { fn encode(&self) -> Vec { - let mut bytes = [0; PREOUT_SERIALIZED_LEN]; + let mut bytes = [0; PREOUT_SERIALIZED_SIZE]; self.0 .serialize_compressed(bytes.as_mut_slice()) .expect("serialization length is constant and checked by test; qed"); @@ -354,21 +362,24 @@ pub mod vrf { impl Decode for VrfOutput { fn decode(i: &mut R) -> Result { - let buf = <[u8; PREOUT_SERIALIZED_LEN]>::decode(i)?; - let preout = bandersnatch_vrfs::VrfPreOut::deserialize_compressed(buf.as_slice()) - .map_err(|_| "vrf-preout decode error: bad preout")?; + let buf = <[u8; PREOUT_SERIALIZED_SIZE]>::decode(i)?; + let preout = + bandersnatch_vrfs::VrfPreOut::deserialize_compressed_unchecked(buf.as_slice()) + .map_err(|_| "vrf-preout decode error: bad preout")?; Ok(VrfOutput(preout)) } } + impl EncodeLike for VrfOutput {} + impl MaxEncodedLen for VrfOutput { fn max_encoded_len() -> usize { - <[u8; PREOUT_SERIALIZED_LEN]>::max_encoded_len() + <[u8; PREOUT_SERIALIZED_SIZE]>::max_encoded_len() } } impl TypeInfo for VrfOutput { - type Identity = [u8; PREOUT_SERIALIZED_LEN]; + type Identity = [u8; PREOUT_SERIALIZED_SIZE]; fn type_info() -> scale_info::Type { Self::Identity::type_info() @@ -395,10 +406,10 @@ pub mod vrf { /// will contribute to the signature as well. #[derive(Clone)] pub struct VrfSignData { - /// VRF inputs to be signed. - pub inputs: VrfIosVec, /// Associated protocol transcript. pub transcript: Transcript, + /// VRF inputs to be signed. + pub inputs: VrfIosVec, } impl VrfSignData { @@ -468,10 +479,10 @@ pub mod vrf { /// Refer to [`VrfSignData`] for more details. #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct VrfSignature { - /// VRF (pre)outputs. - pub outputs: VrfIosVec, /// Transcript signature. pub signature: Signature, + /// VRF (pre)outputs. + pub outputs: VrfIosVec, } #[cfg(feature = "full_crypto")] @@ -539,7 +550,7 @@ pub mod vrf { let outputs = VrfIosVec::truncate_from(outputs); let mut signature = - VrfSignature { signature: Signature([0; SIGNATURE_SERIALIZED_LEN]), outputs }; + VrfSignature { signature: Signature([0; SIGNATURE_SERIALIZED_SIZE]), outputs }; thin_signature .proof @@ -567,7 +578,7 @@ pub mod vrf { data: &VrfSignData, signature: &VrfSignature, ) -> bool { - let Ok(public) = PublicKey::deserialize_compressed(self.as_slice()) else { + let Ok(public) = PublicKey::deserialize_compressed_unchecked(self.as_slice()) else { return false }; @@ -577,10 +588,10 @@ pub mod vrf { // Deserialize only the proof, the rest has already been deserialized // This is another hack used because backend signature type is generic over // the number of ios. - let Ok(proof) = - ThinVrfSignature::<0>::deserialize_compressed(signature.signature.as_ref()) - .map(|s| s.proof) - else { + let Ok(proof) = ThinVrfSignature::<0>::deserialize_compressed_unchecked( + signature.signature.as_ref(), + ) + .map(|s| s.proof) else { return false }; let signature = ThinVrfSignature { proof, preouts }; @@ -609,16 +620,100 @@ pub mod vrf { pub mod ring_vrf { use super::{vrf::*, *}; pub use bandersnatch_vrfs::ring::{RingProof, RingProver, RingVerifier, KZG}; - use bandersnatch_vrfs::{CanonicalDeserialize, PublicKey}; + use bandersnatch_vrfs::{ring::VerifierKey, CanonicalDeserialize, PublicKey}; + + /// Ring max size (keyset max size). + pub const RING_MAX_SIZE: u32 = RING_DOMAIN_MAX_SIZE - RING_DOMAIN_OVERHEAD; + + /// Ring domain max size. + pub const RING_DOMAIN_MAX_SIZE: u32 = 2048; + + /// Overhead in the domain size over the max ring size. + /// + /// Some bits of the domain are reserved for the zk proof to work. + pub(crate) const RING_DOMAIN_OVERHEAD: u32 = 257; + + // Max size of serialized ring-vrf context params. + // + // The actual size is dependent on the ring domain size and this value + // has been computed for `RING_DOMAIN_MAX_SIZE` with compression disabled + // for performance reasons. + // + // 1024 uncompressed + // pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 295412; + // 1024 compressed + // pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 147716; + // 2048 uncompressed + pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 590324; + // 2048 compressed + // pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 295172; + + pub(crate) const RING_VERIFIER_DATA_SERIALIZED_SIZE: usize = 388; + pub(crate) const RING_SIGNATURE_SERIALIZED_SIZE: usize = 755; + + /// remove as soon as soon as serialization is implemented by the backend + pub struct RingVerifierData { + /// Domain size. + pub domain_size: u32, + /// Verifier key. + pub verifier_key: VerifierKey, + } + + impl From for RingVerifier { + fn from(vd: RingVerifierData) -> RingVerifier { + bandersnatch_vrfs::ring::make_ring_verifier(vd.verifier_key, vd.domain_size as usize) + } + } + + impl Encode for RingVerifierData { + fn encode(&self) -> Vec { + const ERR_STR: &str = "serialization length is constant and checked by test; qed"; + let mut buf = [0; RING_VERIFIER_DATA_SERIALIZED_SIZE]; + self.domain_size.serialize_compressed(&mut buf[..4]).expect(ERR_STR); + self.verifier_key.serialize_compressed(&mut buf[4..]).expect(ERR_STR); + buf.encode() + } + } + + impl Decode for RingVerifierData { + fn decode(i: &mut R) -> Result { + const ERR_STR: &str = "serialization length is constant and checked by test; qed"; + let buf = <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::decode(i)?; + let domain_size = + ::deserialize_compressed_unchecked(&mut &buf[..4]) + .expect(ERR_STR); + let verifier_key = ::deserialize_compressed_unchecked(&mut &buf[4..]).expect(ERR_STR); + + Ok(RingVerifierData { domain_size, verifier_key }) + } + } - /// Context used to produce ring signatures. + impl EncodeLike for RingVerifierData {} + + impl MaxEncodedLen for RingVerifierData { + fn max_encoded_len() -> usize { + <[u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]>::max_encoded_len() + } + } + + impl TypeInfo for RingVerifierData { + type Identity = [u8; RING_VERIFIER_DATA_SERIALIZED_SIZE]; + + fn type_info() -> scale_info::Type { + Self::Identity::type_info() + } + } + + /// Context used to construct ring prover and verifier. #[derive(Clone)] pub struct RingContext(KZG); impl RingContext { - /// Build an dummy instance used for testing purposes. + /// Build an dummy instance for testing purposes. + /// + /// `domain_size` is set to `RING_DOMAIN_MAX_SIZE`. pub fn new_testing() -> Self { - Self(KZG::testing_kzg_setup([0; 32], RING_DOMAIN_SIZE as u32)) + Self(KZG::testing_kzg_setup([0; 32], RING_DOMAIN_MAX_SIZE)) } /// Get the keyset max size. @@ -630,7 +725,7 @@ pub mod ring_vrf { pub fn prover(&self, public_keys: &[Public], public_idx: usize) -> Option { let mut pks = Vec::with_capacity(public_keys.len()); for public_key in public_keys { - let pk = PublicKey::deserialize_compressed(public_key.as_slice()).ok()?; + let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?; pks.push(pk.0.into()); } @@ -643,7 +738,7 @@ pub mod ring_vrf { pub fn verifier(&self, public_keys: &[Public]) -> Option { let mut pks = Vec::with_capacity(public_keys.len()); for public_key in public_keys { - let pk = PublicKey::deserialize_compressed(public_key.as_slice()).ok()?; + let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?; pks.push(pk.0.into()); } @@ -651,13 +746,26 @@ pub mod ring_vrf { let ring_verifier = self.0.init_ring_verifier(verifier_key); Some(ring_verifier) } + + /// Information required for a lazy construction of a ring verifier. + pub fn verifier_data(&self, public_keys: &[Public]) -> Option { + let mut pks = Vec::with_capacity(public_keys.len()); + for public_key in public_keys { + let pk = PublicKey::deserialize_compressed_unchecked(public_key.as_slice()).ok()?; + pks.push(pk.0.into()); + } + Some(RingVerifierData { + verifier_key: self.0.verifier_key(pks), + domain_size: self.0.domain_size, + }) + } } impl Encode for RingContext { fn encode(&self) -> Vec { - let mut buf = Box::new([0; RING_CONTEXT_SERIALIZED_LEN]); + let mut buf = Box::new([0; RING_CONTEXT_SERIALIZED_MAX_SIZE]); self.0 - .serialize_compressed(buf.as_mut_slice()) + .serialize_uncompressed(buf.as_mut_slice()) .expect("serialization length is constant and checked by test; qed"); buf.encode() } @@ -665,9 +773,9 @@ pub mod ring_vrf { impl Decode for RingContext { fn decode(i: &mut R) -> Result { - let buf = >::decode(i)?; - let kzg = - KZG::deserialize_compressed(buf.as_slice()).map_err(|_| "KZG decode error")?; + let buf = >::decode(i)?; + let kzg = KZG::deserialize_uncompressed_unchecked(buf.as_slice()) + .map_err(|_| "KZG decode error")?; Ok(RingContext(kzg)) } } @@ -676,12 +784,12 @@ pub mod ring_vrf { impl MaxEncodedLen for RingContext { fn max_encoded_len() -> usize { - <[u8; RING_CONTEXT_SERIALIZED_LEN]>::max_encoded_len() + <[u8; RING_CONTEXT_SERIALIZED_MAX_SIZE]>::max_encoded_len() } } impl TypeInfo for RingContext { - type Identity = [u8; RING_CONTEXT_SERIALIZED_LEN]; + type Identity = [u8; RING_CONTEXT_SERIALIZED_MAX_SIZE]; fn type_info() -> scale_info::Type { Self::Identity::type_info() @@ -691,10 +799,10 @@ pub mod ring_vrf { /// Ring VRF signature. #[derive(Clone, Debug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct RingVrfSignature { + /// Ring signature. + pub signature: [u8; RING_SIGNATURE_SERIALIZED_SIZE], /// VRF (pre)outputs. pub outputs: VrfIosVec, - /// Ring signature. - pub signature: [u8; RING_SIGNATURE_SERIALIZED_LEN], } #[cfg(feature = "full_crypto")] @@ -731,7 +839,7 @@ pub mod ring_vrf { let outputs = VrfIosVec::truncate_from(outputs); let mut signature = - RingVrfSignature { outputs, signature: [0; RING_SIGNATURE_SERIALIZED_LEN] }; + RingVrfSignature { outputs, signature: [0; RING_SIGNATURE_SERIALIZED_SIZE] }; ring_signature .proof @@ -769,7 +877,7 @@ pub mod ring_vrf { verifier: &RingVerifier, ) -> bool { let Ok(vrf_signature) = - bandersnatch_vrfs::RingVrfSignature::<0>::deserialize_compressed( + bandersnatch_vrfs::RingVrfSignature::<0>::deserialize_compressed_unchecked( self.signature.as_slice(), ) else { @@ -795,7 +903,7 @@ pub mod ring_vrf { mod tests { use super::{ring_vrf::*, vrf::*, *}; use crate::crypto::{VrfPublic, VrfSecret, DEV_PHRASE}; - const DEV_SEED: &[u8; SEED_SERIALIZED_LEN] = &[0xcb; SEED_SERIALIZED_LEN]; + const DEV_SEED: &[u8; SEED_SERIALIZED_SIZE] = &[0xcb; SEED_SERIALIZED_SIZE]; #[allow(unused)] fn b2h(bytes: &[u8]) -> String { @@ -808,9 +916,10 @@ mod tests { #[test] fn backend_assumptions_sanity_check() { - let kzg = KZG::testing_kzg_setup([0; 32], RING_DOMAIN_SIZE as u32); - assert_eq!(kzg.max_keyset_size(), RING_DOMAIN_SIZE - 257); - assert_eq!(kzg.compressed_size(), RING_CONTEXT_SERIALIZED_LEN); + let kzg = KZG::testing_kzg_setup([0; 32], RING_DOMAIN_MAX_SIZE); + assert_eq!(kzg.max_keyset_size() as u32, RING_MAX_SIZE); + + assert_eq!(kzg.uncompressed_size(), RING_CONTEXT_SERIALIZED_MAX_SIZE); let pks: Vec<_> = (0..16) .map(|i| SecretKey::from_seed(&[i as u8; 32]).to_public().0.into()) @@ -819,11 +928,14 @@ mod tests { let secret = SecretKey::from_seed(&[0u8; 32]); let public = secret.to_public(); - assert_eq!(public.compressed_size(), PUBLIC_SERIALIZED_LEN); + assert_eq!(public.compressed_size(), PUBLIC_SERIALIZED_SIZE); let input = VrfInput::new(b"foo", &[]); let preout = secret.vrf_preout(&input.0); - assert_eq!(preout.compressed_size(), PREOUT_SERIALIZED_LEN); + assert_eq!(preout.compressed_size(), PREOUT_SERIALIZED_SIZE); + + let verifier_key = kzg.verifier_key(pks.clone()); + assert_eq!(verifier_key.compressed_size() + 4, RING_VERIFIER_DATA_SERIALIZED_SIZE); let prover_key = kzg.prover_key(pks); let ring_prover = kzg.init_ring_prover(prover_key, 0); @@ -832,12 +944,12 @@ mod tests { let thin_signature: bandersnatch_vrfs::ThinVrfSignature<0> = secret.sign_thin_vrf(data.transcript.clone(), &[]); - assert_eq!(thin_signature.compressed_size(), SIGNATURE_SERIALIZED_LEN); + assert_eq!(thin_signature.compressed_size(), SIGNATURE_SERIALIZED_SIZE); let ring_signature: bandersnatch_vrfs::RingVrfSignature<0> = bandersnatch_vrfs::RingProver { ring_prover: &ring_prover, secret: &secret } .sign_ring_vrf(data.transcript.clone(), &[]); - assert_eq!(ring_signature.compressed_size(), RING_SIGNATURE_SERIALIZED_LEN); + assert_eq!(ring_signature.compressed_size(), RING_SIGNATURE_SERIALIZED_SIZE); } #[test] @@ -941,7 +1053,8 @@ mod tests { let bytes = expected.encode(); - let expected_len = data.inputs.len() * PREOUT_SERIALIZED_LEN + SIGNATURE_SERIALIZED_LEN + 1; + let expected_len = + data.inputs.len() * PREOUT_SERIALIZED_SIZE + SIGNATURE_SERIALIZED_SIZE + 1; assert_eq!(bytes.len(), expected_len); let decoded = VrfSignature::decode(&mut bytes.as_slice()).unwrap(); @@ -1055,7 +1168,7 @@ mod tests { let bytes = expected.encode(); let expected_len = - data.inputs.len() * PREOUT_SERIALIZED_LEN + RING_SIGNATURE_SERIALIZED_LEN + 1; + data.inputs.len() * PREOUT_SERIALIZED_SIZE + RING_SIGNATURE_SERIALIZED_SIZE + 1; assert_eq!(bytes.len(), expected_len); let decoded = RingVrfSignature::decode(&mut bytes.as_slice()).unwrap(); @@ -1067,11 +1180,31 @@ mod tests { let ctx1 = RingContext::new_testing(); let enc1 = ctx1.encode(); - assert_eq!(enc1.len(), RingContext::max_encoded_len()); + assert_eq!(enc1.len(), RING_CONTEXT_SERIALIZED_MAX_SIZE); + assert_eq!(RingContext::max_encoded_len(), RING_CONTEXT_SERIALIZED_MAX_SIZE); let ctx2 = RingContext::decode(&mut enc1.as_slice()).unwrap(); let enc2 = ctx2.encode(); assert_eq!(enc1, enc2); } + + #[test] + fn encode_decode_verifier_data() { + let ring_ctx = RingContext::new_testing(); + + let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); + assert!(pks.len() <= ring_ctx.max_keyset_size()); + + let verifier_data = ring_ctx.verifier_data(&pks).unwrap(); + let enc1 = verifier_data.encode(); + + assert_eq!(enc1.len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); + assert_eq!(RingVerifierData::max_encoded_len(), RING_VERIFIER_DATA_SERIALIZED_SIZE); + + let vd2 = RingVerifierData::decode(&mut enc1.as_slice()).unwrap(); + let enc2 = vd2.encode(); + + assert_eq!(enc1, enc2); + } } -- GitLab From 87a73e7afafe6910eecaa55a73051b3f2f1de695 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Dec 2023 18:57:47 +0100 Subject: [PATCH 178/199] Bump the known_good_semver group with 1 update (#2575) Bumps the known_good_semver group with 1 update: [syn](https://github.com/dtolnay/syn). Updates `syn` from 2.0.38 to 2.0.39
Release notes

Sourced from syn's releases.

2.0.39

  • Fix parsing of return expression in match guards (#1528)
  • Improve error message on labeled loop as value expression for break (#1531)
Commits
  • 924217c Release 2.0.39
  • 95aeeb5 Merge pull request #1531 from dtolnay/breaklabel
  • b88f86f Ignore single_element_loop clippy lint in test
  • a876185 Improve error on break followed by labeled loop
  • 32ab979 Add test of ambiguous label parsing in break
  • 6f658f8 Merge pull request #1530 from dtolnay/canbeginexpr
  • 20497e1 More precise decision to parse expression after return keyword
  • c6a651a Update name of ExprReturn parse function to match variant name
  • c274590 Indicate that peek argument refers to syn::Ident
  • 3e67cb0 Merge pull request #1529 from dtolnay/up
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=syn&package-manager=cargo&previous-version=2.0.38&new-version=2.0.39)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 102 +++++++++--------- .../parachain-system/proc-macro/Cargo.toml | 2 +- polkadot/node/gum/proc-macro/Cargo.toml | 2 +- polkadot/xcm/procedural/Cargo.toml | 2 +- substrate/client/chain-spec/derive/Cargo.toml | 2 +- .../client/tracing/proc-macro/Cargo.toml | 2 +- .../frame/contracts/proc-macro/Cargo.toml | 2 +- .../solution-type/Cargo.toml | 2 +- .../frame/staking/reward-curve/Cargo.toml | 2 +- substrate/frame/support/procedural/Cargo.toml | 2 +- .../frame/support/procedural/tools/Cargo.toml | 2 +- .../procedural/tools/derive/Cargo.toml | 2 +- .../primitives/api/proc-macro/Cargo.toml | 2 +- .../core/hashing/proc-macro/Cargo.toml | 2 +- substrate/primitives/debug-derive/Cargo.toml | 2 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- .../primitives/version/proc-macro/Cargo.toml | 2 +- 17 files changed, 67 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 241a2fd3d15..7bec4ad24ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1145,7 +1145,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1162,7 +1162,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -1338,7 +1338,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -2549,7 +2549,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -3608,7 +3608,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4119,7 +4119,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4159,7 +4159,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4176,7 +4176,7 @@ checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4514,7 +4514,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4575,7 +4575,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.38", + "syn 2.0.39", "termcolor", "toml 0.7.6", "walkdir", @@ -4830,7 +4830,7 @@ checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4841,7 +4841,7 @@ checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -4996,7 +4996,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5383,7 +5383,7 @@ dependencies = [ "quote", "scale-info", "sp-arithmetic", - "syn 2.0.38", + "syn 2.0.39", "trybuild", ] @@ -5535,7 +5535,7 @@ dependencies = [ "proc-macro2", "quote", "sp-core-hashing", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5546,7 +5546,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5555,7 +5555,7 @@ version = "3.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -5788,7 +5788,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7792,7 +7792,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7806,7 +7806,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7817,7 +7817,7 @@ checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -7828,7 +7828,7 @@ checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -9544,7 +9544,7 @@ version = "4.0.0-dev" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -10686,7 +10686,7 @@ dependencies = [ "proc-macro2", "quote", "sp-runtime", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -11555,7 +11555,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -11596,7 +11596,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -13474,7 +13474,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -13565,7 +13565,7 @@ checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -13637,7 +13637,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -14039,7 +14039,7 @@ checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -14871,7 +14871,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -16127,7 +16127,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -16496,7 +16496,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -16562,7 +16562,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -17006,7 +17006,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -17341,7 +17341,7 @@ version = "9.0.0" dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -17399,7 +17399,7 @@ version = "8.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -17409,7 +17409,7 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -17683,7 +17683,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -17695,7 +17695,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -17967,7 +17967,7 @@ dependencies = [ "proc-macro2", "quote", "sp-version", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -18801,9 +18801,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -19071,7 +19071,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -19243,7 +19243,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -19460,7 +19460,7 @@ checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -19502,7 +19502,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] @@ -20057,7 +20057,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-shared", ] @@ -20091,7 +20091,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -21269,7 +21269,7 @@ dependencies = [ "proc-macro2", "quote", "staging-xcm", - "syn 2.0.38", + "syn 2.0.39", "trybuild", ] @@ -21389,7 +21389,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.39", ] [[package]] diff --git a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml index ed85b718f50..a0f18237c79 100644 --- a/cumulus/pallets/parachain-system/proc-macro/Cargo.toml +++ b/cumulus/pallets/parachain-system/proc-macro/Cargo.toml @@ -10,7 +10,7 @@ license = "Apache-2.0" proc-macro = true [dependencies] -syn = "2.0.38" +syn = "2.0.39" proc-macro2 = "1.0.64" quote = "1.0.33" proc-macro-crate = "2.0.0" diff --git a/polkadot/node/gum/proc-macro/Cargo.toml b/polkadot/node/gum/proc-macro/Cargo.toml index c775c570705..731798cc01e 100644 --- a/polkadot/node/gum/proc-macro/Cargo.toml +++ b/polkadot/node/gum/proc-macro/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.38", features = ["extra-traits", "full"] } +syn = { version = "2.0.39", features = ["extra-traits", "full"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "2.0.0" diff --git a/polkadot/xcm/procedural/Cargo.toml b/polkadot/xcm/procedural/Cargo.toml index 8ab27c91dae..b42f69d4438 100644 --- a/polkadot/xcm/procedural/Cargo.toml +++ b/polkadot/xcm/procedural/Cargo.toml @@ -13,7 +13,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.38" +syn = "2.0.39" Inflector = "0.11.4" [dev-dependencies] diff --git a/substrate/client/chain-spec/derive/Cargo.toml b/substrate/client/chain-spec/derive/Cargo.toml index c200d84244d..4dea856b03f 100644 --- a/substrate/client/chain-spec/derive/Cargo.toml +++ b/substrate/client/chain-spec/derive/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = "2.0.38" +syn = "2.0.39" diff --git a/substrate/client/tracing/proc-macro/Cargo.toml b/substrate/client/tracing/proc-macro/Cargo.toml index f0cca5c41b6..1d5d638c49b 100644 --- a/substrate/client/tracing/proc-macro/Cargo.toml +++ b/substrate/client/tracing/proc-macro/Cargo.toml @@ -18,4 +18,4 @@ proc-macro = true proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.38", features = ["extra-traits", "full", "parsing", "proc-macro"] } +syn = { version = "2.0.39", features = ["extra-traits", "full", "parsing", "proc-macro"] } diff --git a/substrate/frame/contracts/proc-macro/Cargo.toml b/substrate/frame/contracts/proc-macro/Cargo.toml index 3ada9e0c23d..8aa6e6679cd 100644 --- a/substrate/frame/contracts/proc-macro/Cargo.toml +++ b/substrate/frame/contracts/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full"] } +syn = { version = "2.0.39", features = ["full"] } [dev-dependencies] diff --git a/substrate/frame/election-provider-support/solution-type/Cargo.toml b/substrate/frame/election-provider-support/solution-type/Cargo.toml index e5d59685661..5bf84daf52d 100644 --- a/substrate/frame/election-provider-support/solution-type/Cargo.toml +++ b/substrate/frame/election-provider-support/solution-type/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "2.0.38", features = ["full", "visit"] } +syn = { version = "2.0.39", features = ["full", "visit"] } quote = "1.0.28" proc-macro2 = "1.0.56" proc-macro-crate = "2.0.0" diff --git a/substrate/frame/staking/reward-curve/Cargo.toml b/substrate/frame/staking/reward-curve/Cargo.toml index 2d57dc60fff..d3a1f439cf9 100644 --- a/substrate/frame/staking/reward-curve/Cargo.toml +++ b/substrate/frame/staking/reward-curve/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "visit"] } +syn = { version = "2.0.39", features = ["full", "visit"] } [dev-dependencies] sp-runtime = { path = "../../../primitives/runtime" } diff --git a/substrate/frame/support/procedural/Cargo.toml b/substrate/frame/support/procedural/Cargo.toml index 5422ca0635c..f1d8a7d4ca9 100644 --- a/substrate/frame/support/procedural/Cargo.toml +++ b/substrate/frame/support/procedural/Cargo.toml @@ -21,7 +21,7 @@ cfg-expr = "0.15.5" itertools = "0.10.3" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["full"] } +syn = { version = "2.0.39", features = ["full"] } frame-support-procedural-tools = { path = "tools" } macro_magic = { version = "0.5.0", features = ["proc_support"] } proc-macro-warning = { version = "1.0.0", default-features = false } diff --git a/substrate/frame/support/procedural/tools/Cargo.toml b/substrate/frame/support/procedural/tools/Cargo.toml index 4921a2a9428..bc5cc7fdda5 100644 --- a/substrate/frame/support/procedural/tools/Cargo.toml +++ b/substrate/frame/support/procedural/tools/Cargo.toml @@ -15,5 +15,5 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["extra-traits", "full", "visit"] } +syn = { version = "2.0.39", features = ["extra-traits", "full", "visit"] } frame-support-procedural-tools-derive = { path = "derive" } diff --git a/substrate/frame/support/procedural/tools/derive/Cargo.toml b/substrate/frame/support/procedural/tools/derive/Cargo.toml index c943de998f4..6040449df65 100644 --- a/substrate/frame/support/procedural/tools/derive/Cargo.toml +++ b/substrate/frame/support/procedural/tools/derive/Cargo.toml @@ -17,4 +17,4 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.56" quote = { version = "1.0.28", features = ["proc-macro"] } -syn = { version = "2.0.38", features = ["extra-traits", "full", "parsing", "proc-macro"] } +syn = { version = "2.0.39", features = ["extra-traits", "full", "parsing", "proc-macro"] } diff --git a/substrate/primitives/api/proc-macro/Cargo.toml b/substrate/primitives/api/proc-macro/Cargo.toml index 232b988124b..46d05c78b4a 100644 --- a/substrate/primitives/api/proc-macro/Cargo.toml +++ b/substrate/primitives/api/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.38", features = ["extra-traits", "fold", "full", "visit"] } +syn = { version = "2.0.39", features = ["extra-traits", "fold", "full", "visit"] } proc-macro2 = "1.0.56" blake2 = { version = "0.10.4", default-features = false } proc-macro-crate = "2.0.0" diff --git a/substrate/primitives/core/hashing/proc-macro/Cargo.toml b/substrate/primitives/core/hashing/proc-macro/Cargo.toml index e34e20472bf..a5e5956e94f 100644 --- a/substrate/primitives/core/hashing/proc-macro/Cargo.toml +++ b/substrate/primitives/core/hashing/proc-macro/Cargo.toml @@ -17,5 +17,5 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = { version = "2.0.38", features = ["full", "parsing"] } +syn = { version = "2.0.39", features = ["full", "parsing"] } sp-core-hashing = { path = "..", default-features = false } diff --git a/substrate/primitives/debug-derive/Cargo.toml b/substrate/primitives/debug-derive/Cargo.toml index 217bcfa98d5..1f739c256d0 100644 --- a/substrate/primitives/debug-derive/Cargo.toml +++ b/substrate/primitives/debug-derive/Cargo.toml @@ -18,7 +18,7 @@ proc-macro = true [dependencies] quote = "1.0.28" -syn = "2.0.38" +syn = "2.0.39" proc-macro2 = "1.0.56" [features] diff --git a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml index 6018ef6c2d4..efabaee3aeb 100644 --- a/substrate/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/substrate/primitives/runtime-interface/proc-macro/Cargo.toml @@ -21,4 +21,4 @@ proc-macro-crate = "2.0.0" proc-macro2 = "1.0.56" quote = "1.0.28" expander = "2.0.0" -syn = { version = "2.0.38", features = ["extra-traits", "fold", "full", "visit"] } +syn = { version = "2.0.39", features = ["extra-traits", "fold", "full", "visit"] } diff --git a/substrate/primitives/version/proc-macro/Cargo.toml b/substrate/primitives/version/proc-macro/Cargo.toml index 88f7193f50e..715316b842d 100644 --- a/substrate/primitives/version/proc-macro/Cargo.toml +++ b/substrate/primitives/version/proc-macro/Cargo.toml @@ -19,7 +19,7 @@ proc-macro = true codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } proc-macro2 = "1.0.56" quote = "1.0.28" -syn = { version = "2.0.38", features = ["extra-traits", "fold", "full", "visit"] } +syn = { version = "2.0.39", features = ["extra-traits", "fold", "full", "visit"] } [dev-dependencies] sp-version = { path = ".." } -- GitLab From 5789d6a7b4ab3111f76b56a706ecdc672c4714c7 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Fri, 1 Dec 2023 21:23:19 +0100 Subject: [PATCH 179/199] sassafras: taplo happy (#2583) fixes: `taplo format check` --- substrate/frame/sassafras/Cargo.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/substrate/frame/sassafras/Cargo.toml b/substrate/frame/sassafras/Cargo.toml index 7ab2e2e1770..745297bd416 100644 --- a/substrate/frame/sassafras/Cargo.toml +++ b/substrate/frame/sassafras/Cargo.toml @@ -16,12 +16,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] scale-codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } -frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } -frame-support = { path = "../support", default-features = false } -frame-system = { path = "../system", default-features = false } +frame-benchmarking = { path = "../benchmarking", default-features = false, optional = true } +frame-support = { path = "../support", default-features = false } +frame-system = { path = "../system", default-features = false } log = { version = "0.4.17", default-features = false } sp-consensus-sassafras = { path = "../../primitives/consensus/sassafras", default-features = false, features = ["serde"] } -sp-io = { path = "../../primitives/io", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } @@ -30,7 +30,7 @@ array-bytes = "6.1" sp-core = { path = "../../primitives/core" } [features] -default = [ "std" ] +default = ["std"] std = [ "frame-benchmarking?/std", "frame-support/std", -- GitLab From ecfdb2b74f2735e3b5a755fea6d659fc742da1d6 Mon Sep 17 00:00:00 2001 From: Davide Galassi Date: Fri, 1 Dec 2023 22:50:34 +0100 Subject: [PATCH 180/199] Bandersnatch: ring-context generic over domain size (#2581) Serialized length is now statically computed depending on the domain size. Opens the primitive to more generic usages not related to Sassafras expectations Address one point of https://github.com/paritytech/polkadot-sdk/issues/2364 --- .../primitives/consensus/sassafras/src/vrf.rs | 8 +- substrate/primitives/core/src/bandersnatch.rs | 115 ++++++++++-------- substrate/primitives/keystore/src/testing.rs | 2 +- 3 files changed, 70 insertions(+), 55 deletions(-) diff --git a/substrate/primitives/consensus/sassafras/src/vrf.rs b/substrate/primitives/consensus/sassafras/src/vrf.rs index bdbac0aae03..afcb1d09f8f 100644 --- a/substrate/primitives/consensus/sassafras/src/vrf.rs +++ b/substrate/primitives/consensus/sassafras/src/vrf.rs @@ -23,10 +23,16 @@ use sp_consensus_slots::Slot; use sp_std::vec::Vec; pub use sp_core::bandersnatch::{ - ring_vrf::{RingContext, RingProver, RingVerifier, RingVerifierData, RingVrfSignature}, + ring_vrf::{RingProver, RingVerifier, RingVerifierData, RingVrfSignature}, vrf::{VrfInput, VrfOutput, VrfSignData, VrfSignature}, }; +/// Ring VRF domain size for Sassafras consensus. +pub const RING_VRF_DOMAIN_SIZE: u32 = 2048; + +/// Bandersnatch VRF [`RingContext`] specialization for Sassafras using [`RING_VRF_DOMAIN_SIZE`]. +pub type RingContext = sp_core::bandersnatch::ring_vrf::RingContext; + fn vrf_input_from_data( domain: &[u8], data: impl IntoIterator>, diff --git a/substrate/primitives/core/src/bandersnatch.rs b/substrate/primitives/core/src/bandersnatch.rs index 1d666f13b62..b91c001a7de 100644 --- a/substrate/primitives/core/src/bandersnatch.rs +++ b/substrate/primitives/core/src/bandersnatch.rs @@ -39,7 +39,7 @@ use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use scale_info::TypeInfo; use sp_runtime_interface::pass_by::PassByInner; -use sp_std::{boxed::Box, vec::Vec}; +use sp_std::{vec, vec::Vec}; /// Identifier used to match public keys against bandersnatch-vrf keys. pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"band"); @@ -622,31 +622,25 @@ pub mod ring_vrf { pub use bandersnatch_vrfs::ring::{RingProof, RingProver, RingVerifier, KZG}; use bandersnatch_vrfs::{ring::VerifierKey, CanonicalDeserialize, PublicKey}; - /// Ring max size (keyset max size). - pub const RING_MAX_SIZE: u32 = RING_DOMAIN_MAX_SIZE - RING_DOMAIN_OVERHEAD; + /// Overhead in the domain size with respect to the supported ring size. + /// + /// Some bits of the domain are reserved for the zk-proof to work. + pub const RING_DOMAIN_OVERHEAD: u32 = 257; - /// Ring domain max size. - pub const RING_DOMAIN_MAX_SIZE: u32 = 2048; + // Max size of serialized ring-vrf context given `domain_len`. + pub(crate) const fn ring_context_serialized_size(domain_len: u32) -> usize { + // const G1_POINT_COMPRESSED_SIZE: usize = 48; + // const G2_POINT_COMPRESSED_SIZE: usize = 96; + const G1_POINT_UNCOMPRESSED_SIZE: usize = 96; + const G2_POINT_UNCOMPRESSED_SIZE: usize = 192; + const OVERHEAD_SIZE: usize = 20; + const G2_POINTS_NUM: usize = 2; + let g1_points_num = 3 * domain_len as usize + 1; - /// Overhead in the domain size over the max ring size. - /// - /// Some bits of the domain are reserved for the zk proof to work. - pub(crate) const RING_DOMAIN_OVERHEAD: u32 = 257; - - // Max size of serialized ring-vrf context params. - // - // The actual size is dependent on the ring domain size and this value - // has been computed for `RING_DOMAIN_MAX_SIZE` with compression disabled - // for performance reasons. - // - // 1024 uncompressed - // pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 295412; - // 1024 compressed - // pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 147716; - // 2048 uncompressed - pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 590324; - // 2048 compressed - // pub(crate) const RING_CONTEXT_SERIALIZED_MAX_SIZE: usize = 295172; + OVERHEAD_SIZE + + g1_points_num * G1_POINT_UNCOMPRESSED_SIZE + + G2_POINTS_NUM * G2_POINT_UNCOMPRESSED_SIZE + } pub(crate) const RING_VERIFIER_DATA_SERIALIZED_SIZE: usize = 388; pub(crate) const RING_SIGNATURE_SERIALIZED_SIZE: usize = 755; @@ -705,15 +699,17 @@ pub mod ring_vrf { } /// Context used to construct ring prover and verifier. + /// + /// Generic parameter `D` represents the ring domain size and drives + /// the max number of supported ring members [`RingContext::max_keyset_size`] + /// which is equal to `D - RING_DOMAIN_OVERHEAD`. #[derive(Clone)] - pub struct RingContext(KZG); + pub struct RingContext(KZG); - impl RingContext { + impl RingContext { /// Build an dummy instance for testing purposes. - /// - /// `domain_size` is set to `RING_DOMAIN_MAX_SIZE`. pub fn new_testing() -> Self { - Self(KZG::testing_kzg_setup([0; 32], RING_DOMAIN_MAX_SIZE)) + Self(KZG::testing_kzg_setup([0; 32], D)) } /// Get the keyset max size. @@ -761,38 +757,45 @@ pub mod ring_vrf { } } - impl Encode for RingContext { + impl Encode for RingContext { fn encode(&self) -> Vec { - let mut buf = Box::new([0; RING_CONTEXT_SERIALIZED_MAX_SIZE]); + let mut buf = vec![0; ring_context_serialized_size(D)]; self.0 .serialize_uncompressed(buf.as_mut_slice()) .expect("serialization length is constant and checked by test; qed"); - buf.encode() + buf } } - impl Decode for RingContext { - fn decode(i: &mut R) -> Result { - let buf = >::decode(i)?; + impl Decode for RingContext { + fn decode(input: &mut R) -> Result { + let mut buf = vec![0; ring_context_serialized_size(D)]; + input.read(&mut buf[..])?; let kzg = KZG::deserialize_uncompressed_unchecked(buf.as_slice()) .map_err(|_| "KZG decode error")?; Ok(RingContext(kzg)) } } - impl EncodeLike for RingContext {} + impl EncodeLike for RingContext {} - impl MaxEncodedLen for RingContext { + impl MaxEncodedLen for RingContext { fn max_encoded_len() -> usize { - <[u8; RING_CONTEXT_SERIALIZED_MAX_SIZE]>::max_encoded_len() + ring_context_serialized_size(D) } } - impl TypeInfo for RingContext { - type Identity = [u8; RING_CONTEXT_SERIALIZED_MAX_SIZE]; + impl TypeInfo for RingContext { + type Identity = Self; fn type_info() -> scale_info::Type { - Self::Identity::type_info() + let path = scale_info::Path::new("RingContext", module_path!()); + let array_type_def = scale_info::TypeDefArray { + len: ring_context_serialized_size(D) as u32, + type_param: scale_info::MetaType::new::(), + }; + let type_def = scale_info::TypeDef::Array(array_type_def); + scale_info::Type { path, type_params: Vec::new(), type_def, docs: Vec::new() } } } @@ -903,7 +906,11 @@ pub mod ring_vrf { mod tests { use super::{ring_vrf::*, vrf::*, *}; use crate::crypto::{VrfPublic, VrfSecret, DEV_PHRASE}; + const DEV_SEED: &[u8; SEED_SERIALIZED_SIZE] = &[0xcb; SEED_SERIALIZED_SIZE]; + const TEST_DOMAIN_SIZE: u32 = 1024; + + type TestRingContext = RingContext; #[allow(unused)] fn b2h(bytes: &[u8]) -> String { @@ -916,10 +923,10 @@ mod tests { #[test] fn backend_assumptions_sanity_check() { - let kzg = KZG::testing_kzg_setup([0; 32], RING_DOMAIN_MAX_SIZE); - assert_eq!(kzg.max_keyset_size() as u32, RING_MAX_SIZE); + let kzg = KZG::testing_kzg_setup([0; 32], TEST_DOMAIN_SIZE); + assert_eq!(kzg.max_keyset_size() as u32, TEST_DOMAIN_SIZE - RING_DOMAIN_OVERHEAD); - assert_eq!(kzg.uncompressed_size(), RING_CONTEXT_SERIALIZED_MAX_SIZE); + assert_eq!(kzg.uncompressed_size(), ring_context_serialized_size(TEST_DOMAIN_SIZE)); let pks: Vec<_> = (0..16) .map(|i| SecretKey::from_seed(&[i as u8; 32]).to_public().0.into()) @@ -1071,7 +1078,7 @@ mod tests { #[test] fn ring_vrf_sign_verify() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); @@ -1097,7 +1104,7 @@ mod tests { #[test] fn ring_vrf_sign_verify_with_out_of_ring_key() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); let pair = Pair::from_seed(DEV_SEED); @@ -1116,7 +1123,7 @@ mod tests { #[test] fn ring_vrf_make_bytes_matches() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); @@ -1145,7 +1152,7 @@ mod tests { #[test] fn encode_decode_ring_vrf_signature() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let mut pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); @@ -1177,13 +1184,15 @@ mod tests { #[test] fn encode_decode_ring_vrf_context() { - let ctx1 = RingContext::new_testing(); + let ctx1 = TestRingContext::new_testing(); let enc1 = ctx1.encode(); - assert_eq!(enc1.len(), RING_CONTEXT_SERIALIZED_MAX_SIZE); - assert_eq!(RingContext::max_encoded_len(), RING_CONTEXT_SERIALIZED_MAX_SIZE); + let _ti = ::type_info(); + + assert_eq!(enc1.len(), ring_context_serialized_size(TEST_DOMAIN_SIZE)); + assert_eq!(enc1.len(), TestRingContext::max_encoded_len()); - let ctx2 = RingContext::decode(&mut enc1.as_slice()).unwrap(); + let ctx2 = TestRingContext::decode(&mut enc1.as_slice()).unwrap(); let enc2 = ctx2.encode(); assert_eq!(enc1, enc2); @@ -1191,7 +1200,7 @@ mod tests { #[test] fn encode_decode_verifier_data() { - let ring_ctx = RingContext::new_testing(); + let ring_ctx = TestRingContext::new_testing(); let pks: Vec<_> = (0..16).map(|i| Pair::from_seed(&[i as u8; 32]).public()).collect(); assert!(pks.len() <= ring_ctx.max_keyset_size()); diff --git a/substrate/primitives/keystore/src/testing.rs b/substrate/primitives/keystore/src/testing.rs index 08110e8e497..9b59b6d09ea 100644 --- a/substrate/primitives/keystore/src/testing.rs +++ b/substrate/primitives/keystore/src/testing.rs @@ -525,7 +525,7 @@ mod tests { let store = MemoryKeystore::new(); - let ring_ctx = bandersnatch::ring_vrf::RingContext::new_testing(); + let ring_ctx = bandersnatch::ring_vrf::RingContext::<1024>::new_testing(); let mut pks: Vec<_> = (0..16) .map(|i| bandersnatch::Pair::from_seed(&[i as u8; 32]).public()) -- GitLab From f5051c80032676fe1d5f8599c04e18037fef74af Mon Sep 17 00:00:00 2001 From: Jonathan Udd Date: Sat, 2 Dec 2023 23:05:37 +0100 Subject: [PATCH 181/199] Removed instruction to call init.sh when buidling from source (#2580) # Description Removed instruction to call init.sh when buidling from source from the polkadot readme. --- polkadot/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/polkadot/README.md b/polkadot/README.md index 3c234bb8e3f..465fe5555fe 100644 --- a/polkadot/README.md +++ b/polkadot/README.md @@ -63,7 +63,6 @@ directory of the repo: ```bash git checkout -./scripts/init.sh cargo build --release ``` -- GitLab From 707dbcc63832d3da30ac7ded52a36ff52098fa1e Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 4 Dec 2023 10:14:40 +0100 Subject: [PATCH 182/199] impl guide: update PVF host page; add diagrams (#2579) --- polkadot/roadmap/implementers-guide/README.md | 2 +- .../src/node/utility/candidate-validation.md | 25 +++++ .../src/node/utility/pvf-host-and-workers.md | 93 +++++++++++++++++-- .../src/node/utility/pvf-prechecker.md | 6 +- 4 files changed, 113 insertions(+), 13 deletions(-) diff --git a/polkadot/roadmap/implementers-guide/README.md b/polkadot/roadmap/implementers-guide/README.md index e03c0c45ddb..abff017138c 100644 --- a/polkadot/roadmap/implementers-guide/README.md +++ b/polkadot/roadmap/implementers-guide/README.md @@ -8,7 +8,7 @@ This is available [here](https://paritytech.github.io/polkadot-sdk/book/). ## Local build -To view it locally from the repo root: +To view it locally, run the following (from the `polkadot/` directory): Ensure graphviz is installed: diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md b/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md index e252ec237b7..1a3ff1c6aff 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/candidate-validation.md @@ -5,6 +5,31 @@ This subsystem is responsible for handling candidate validation requests. It is A variety of subsystems want to know if a parachain block candidate is valid. None of them care about the detailed mechanics of how a candidate gets validated, just the results. This subsystem handles those details. +## High-Level Flow + +```dot process +digraph { + rankdir="LR"; + + pre [label = "Pvf-Checker"; shape = square] + bac [label = "Backing"; shape = square] + app [label = "Approval\nVoting"; shape = square] + dis [label = "Dispute\nCoordinator"; shape = square] + + can [label = "Candidate\nValidation"; shape = square] + + pvf [label = "PVF Host"; shape = square] + + pre -> can [style = dashed] + bac -> can + app -> can + dis -> can + + can -> pvf [label = "Precheck"; style = dashed] + can -> pvf [label = "Validate"] +} +``` + ## Protocol Input: [`CandidateValidationMessage`](../../types/overseer-protocol.md#validation-request-type) diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md index 56bdd48bc0c..e0984bd58d1 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-host-and-workers.md @@ -2,12 +2,82 @@ The PVF host is responsible for handling requests to prepare and execute PVF code blobs, which it sends to PVF **workers** running in their own child -processes. +processes. These workers are spawned from the `polkadot-prepare-worker` and +`polkadot-execute-worker` binaries. While the workers are generally long-living, they also spawn one-off secure **job processes** that perform the jobs. See "Job Processes" section below. -This system has two high-levels goals that we will touch on here: *determinism* +## High-Level Flow + +```dot process +digraph { + rankdir="LR"; + + can [label = "Candidate\nValidation\nSubsystem"; shape = square] + + pvf [label = "PVF Host"; shape = square] + + pq [label = "Prepare\nQueue"; shape = square] + eq [label = "Execute\nQueue"; shape = square] + pp [label = "Prepare\nPool"; shape = square] + + subgraph "cluster partial_sandbox_prep" { + label = "polkadot-prepare-worker\n(Partial Sandbox)\n\n\n"; + labelloc = "t"; + + pw [label = "Prepare\nWorker"; shape = square] + + subgraph "cluster full_sandbox_prep" { + label = "Fully Isolated Sandbox\n\n\n"; + labelloc = "t"; + + pj [label = "Prepare\nJob"; shape = square] + } + } + + subgraph "cluster partial_sandbox_exec" { + label = "polkadot-execute-worker\n(Partial Sandbox)\n\n\n"; + labelloc = "t"; + + ew [label = "Execute\nWorker"; shape = square] + + subgraph "cluster full_sandbox_exec" { + label = "Fully Isolated Sandbox\n\n\n"; + labelloc = "t"; + + ej [label = "Execute\nJob"; shape = square] + } + } + + can -> pvf [label = "Precheck"; style = dashed] + can -> pvf [label = "Validate"] + + pvf -> pq [label = "Prepare"; style = dashed] + pvf -> eq [label = "Execute";] + pvf -> pvf [label = "see (2) and (3)"; style = dashed] + pq -> pp [style = dashed] + + pp -> pw [style = dashed] + eq -> ew + + pw -> pj [style = dashed] + ew -> ej +} +``` + +Some notes about the graph: + +1. Once a job has finished, the response will flow back up the way it came. +2. In the case of execution, the host will send a request for preparation to the + Prepare Queue if needed. In that case, only after the preparation succeeds + does the Execute Queue continue with validation. +3. Multiple requests for preparing the same artifact are coalesced, so that the + work is only done once. + +## Goals + +This system has two high-level goals that we will touch on here: *determinism* and *security*. ## Determinism @@ -142,19 +212,24 @@ So what are we actually worried about? Things that come to mind: 6. **Intercepting and manipulating packages** - Effect very similar to the above, hard to do without also being able to do 4 or 5. +We do not protect against (1), (2), and (3), because there are too many sources +of randomness for an attacker to exploit. + +We provide very good protection against (4), (5), and (6). + ### Job Processes As mentioned above, our architecture includes long-living **worker processes** -and one-off **job processes*. This separation is important so that the handling +and one-off **job processes**. This separation is important so that the handling of untrusted code can be limited to the job processes. A hijacked job process can therefore not interfere with other jobs running in separate processes. -Furthermore, if an unexpected execution error occurred in the worker and not the -job, we generally can be confident that it has nothing to do with the candidate, -so we can abstain from voting. On the other hand, a hijacked job can send back -erroneous responses for candidates, so we know that we should not abstain from -voting on such errors from jobs. Otherwise, an attacker could trigger a finality -stall. (See "Internal Errors" section above.) +Furthermore, if an unexpected execution error occurred in the execution worker +and not the job itself, we generally can be confident that it has nothing to do +with the candidate, so we can abstain from voting. On the other hand, a hijacked +job is able to send back erroneous responses for candidates, so we know that we +should not abstain from voting on such errors from jobs. Otherwise, an attacker +could trigger a finality stall. (See "Internal Errors" section above.) ### Restricting file-system access diff --git a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md index f0de50f2267..7f6fef7ddf6 100644 --- a/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md +++ b/polkadot/roadmap/implementers-guide/src/node/utility/pvf-prechecker.md @@ -8,9 +8,9 @@ pre-checking. Head over to [overview] for the PVF pre-checking process overview. There is no dedicated input mechanism for PVF pre-checker. Instead, PVF pre-checker looks on the `ActiveLeavesUpdate` event stream for work. -This subsytem does not produce any output messages either. The subsystem will, however, send messages to the [Runtime -API] subsystem to query for the pending PVFs and to submit votes. In addition to that, it will also communicate with -[Candidate Validation] Subsystem to request PVF pre-check. +This subsytem does not produce any output messages either. The subsystem will, however, send messages to the +[Runtime API] subsystem to query for the pending PVFs and to submit votes. In addition to that, it will also +communicate with [Candidate Validation] Subsystem to request PVF pre-check. ## Functionality -- GitLab From 35c39c960804578396c78532c393d7385ee154b4 Mon Sep 17 00:00:00 2001 From: gupnik <17176722+gupnik@users.noreply.github.com> Date: Mon, 4 Dec 2023 16:42:14 +0530 Subject: [PATCH 183/199] Fixes runtime type with doc parsing in derive_impl (#2594) Step in https://github.com/paritytech/polkadot-sdk/issues/171 This PR fixes a bug in `derive_impl` causing `#[inject_runtime_type]` attribute to be parsed incorrectly when docs are added. --- .../support/procedural/src/derive_impl.rs | 41 +++++++++++++++---- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/substrate/frame/support/procedural/src/derive_impl.rs b/substrate/frame/support/procedural/src/derive_impl.rs index 8b5e334f1f5..3e044053116 100644 --- a/substrate/frame/support/procedural/src/derive_impl.rs +++ b/substrate/frame/support/procedural/src/derive_impl.rs @@ -46,11 +46,15 @@ pub struct PalletAttr { typ: PalletAttrType, } -fn get_first_item_pallet_attr(item: &syn::ImplItemType) -> syn::Result> -where - Attr: syn::parse::Parse, -{ - item.attrs.get(0).map(|a| syn::parse2(a.into_token_stream())).transpose() +fn is_runtime_type(item: &syn::ImplItemType) -> bool { + item.attrs.iter().any(|attr| { + if let Ok(PalletAttr { typ: PalletAttrType::RuntimeType(_), .. }) = + parse2::(attr.into_token_stream()) + { + return true + } + false + }) } #[derive(Parse, Debug)] @@ -132,10 +136,7 @@ fn combine_impls( return None } if let ImplItem::Type(typ) = item.clone() { - let mut typ = typ.clone(); - if let Ok(Some(PalletAttr { typ: PalletAttrType::RuntimeType(_), .. })) = - get_first_item_pallet_attr::(&mut typ) - { + if is_runtime_type(&typ) { let item: ImplItem = if inject_runtime_types { parse_quote! { type #ident = #ident; @@ -227,3 +228,25 @@ fn test_derive_impl_attr_args_parsing() { assert!(parse2::(quote!()).is_err()); assert!(parse2::(quote!(Config Config)).is_err()); } + +#[test] +fn test_runtime_type_with_doc() { + trait TestTrait { + type Test; + } + #[allow(unused)] + struct TestStruct; + let p = parse2::(quote!( + impl TestTrait for TestStruct { + /// Some doc + #[inject_runtime_type] + type Test = u32; + } + )) + .unwrap(); + for item in p.items { + if let ImplItem::Type(typ) = item { + assert_eq!(is_runtime_type(&typ), true); + } + } +} -- GitLab From 1266de3919dcc9b100c80752ad0158e26ff0fabb Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Mon, 4 Dec 2023 13:16:20 +0100 Subject: [PATCH 184/199] Cleanup XCMP `QueueConfigData` (#2142) Removes obsolete fields from the `QueueConfigData` structure. For the remaining fields, if they use the old defaults, we replace them with the new defaults. Resolves: https://github.com/paritytech/polkadot-sdk/issues/1795 --- cumulus/pallets/xcmp-queue/src/lib.rs | 24 +- cumulus/pallets/xcmp-queue/src/migration.rs | 381 +++++++++++++----- .../assets/asset-hub-rococo/src/lib.rs | 8 +- .../assets/asset-hub-westend/src/lib.rs | 2 + .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 1 + .../bridge-hubs/bridge-hub-westend/src/lib.rs | 2 + .../collectives-westend/src/lib.rs | 2 + .../contracts/contracts-rococo/src/lib.rs | 4 +- prdoc/pr_2142.prdoc | 14 + 9 files changed, 308 insertions(+), 130 deletions(-) create mode 100644 prdoc/pr_2142.prdoc diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index d687f83d8b3..d3443163d08 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -60,7 +60,7 @@ use cumulus_primitives_core::{ use frame_support::{ defensive, defensive_assert, traits::{EnqueueMessage, EnsureOrigin, Get, QueueFootprint, QueuePausedQuery}, - weights::{constants::WEIGHT_REF_TIME_PER_MILLIS, Weight, WeightMeter}, + weights::{Weight, WeightMeter}, BoundedVec, }; use pallet_message_queue::OnQueueChanged; @@ -255,7 +255,7 @@ pub mod pallet { return meter.consumed() } - migration::lazy_migrate_inbound_queue::(); + migration::v3::lazy_migrate_inbound_queue::(); meter.consumed() } @@ -387,36 +387,16 @@ pub struct QueueConfigData { /// The number of pages which the queue must be reduced to before it signals that /// message sending may recommence after it has been suspended. resume_threshold: u32, - /// UNUSED - The amount of remaining weight under which we stop processing messages. - #[deprecated(note = "Will be removed")] - threshold_weight: Weight, - /// UNUSED - The speed to which the available weight approaches the maximum weight. A lower - /// number results in a faster progression. A value of 1 makes the entire weight available - /// initially. - #[deprecated(note = "Will be removed")] - weight_restrict_decay: Weight, - /// UNUSED - The maximum amount of weight any individual message may consume. Messages above - /// this weight go into the overweight queue and may only be serviced explicitly. - #[deprecated(note = "Will be removed")] - xcmp_max_individual_weight: Weight, } impl Default for QueueConfigData { fn default() -> Self { // NOTE that these default values are only used on genesis. They should give a rough idea of // what to set these values to, but is in no way a requirement. - #![allow(deprecated)] Self { drop_threshold: 48, // 64KiB * 48 = 3MiB suspend_threshold: 32, // 64KiB * 32 = 2MiB resume_threshold: 8, // 64KiB * 8 = 512KiB - // unused: - threshold_weight: Weight::from_parts(100_000, 0), - weight_restrict_decay: Weight::from_parts(2, 0), - xcmp_max_individual_weight: Weight::from_parts( - 20u64 * WEIGHT_REF_TIME_PER_MILLIS, - DEFAULT_POV_SIZE, - ), } } } diff --git a/cumulus/pallets/xcmp-queue/src/migration.rs b/cumulus/pallets/xcmp-queue/src/migration.rs index 6d7f434b041..6c86c3011d2 100644 --- a/cumulus/pallets/xcmp-queue/src/migration.rs +++ b/cumulus/pallets/xcmp-queue/src/migration.rs @@ -16,7 +16,7 @@ //! A module that is responsible for migration of storage. -use crate::{Config, OverweightIndex, Pallet, ParaId, QueueConfig, DEFAULT_POV_SIZE}; +use crate::{Config, OverweightIndex, Pallet, QueueConfig, QueueConfigData, DEFAULT_POV_SIZE}; use cumulus_primitives_core::XcmpMessageFormat; use frame_support::{ pallet_prelude::*, @@ -25,37 +25,17 @@ use frame_support::{ }; /// The current storage version. -pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); pub const LOG: &str = "runtime::xcmp-queue-migration"; -/// Migrates the pallet storage to the most recent version. -pub struct MigrationToV3(PhantomData); - -impl OnRuntimeUpgrade for MigrationToV3 { - fn on_runtime_upgrade() -> Weight { - let mut weight = T::DbWeight::get().reads(1); - - if StorageVersion::get::>() == 1 { - weight.saturating_accrue(migrate_to_v2::()); - StorageVersion::new(2).put::>(); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - - if StorageVersion::get::>() == 2 { - weight.saturating_accrue(migrate_to_v3::()); - StorageVersion::new(3).put::>(); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - - weight - } -} - mod v1 { use super::*; use codec::{Decode, Encode}; + #[frame_support::storage_alias] + pub(crate) type QueueConfig = StorageValue, QueueConfigData, ValueQuery>; + #[derive(Encode, Decode, Debug)] pub struct QueueConfigData { pub suspend_threshold: u32, @@ -80,6 +60,84 @@ mod v1 { } } +pub mod v2 { + use super::*; + + #[frame_support::storage_alias] + pub(crate) type QueueConfig = StorageValue, QueueConfigData, ValueQuery>; + + #[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct QueueConfigData { + pub suspend_threshold: u32, + pub drop_threshold: u32, + pub resume_threshold: u32, + pub threshold_weight: Weight, + pub weight_restrict_decay: Weight, + pub xcmp_max_individual_weight: Weight, + } + + impl Default for QueueConfigData { + fn default() -> Self { + Self { + suspend_threshold: 2, + drop_threshold: 5, + resume_threshold: 1, + threshold_weight: Weight::from_parts(100_000, 0), + weight_restrict_decay: Weight::from_parts(2, 0), + xcmp_max_individual_weight: Weight::from_parts( + 20u64 * WEIGHT_REF_TIME_PER_MILLIS, + DEFAULT_POV_SIZE, + ), + } + } + } + + /// Migrates `QueueConfigData` from v1 (using only reference time weights) to v2 (with + /// 2D weights). + pub struct UncheckedMigrationToV2(PhantomData); + + impl OnRuntimeUpgrade for UncheckedMigrationToV2 { + #[allow(deprecated)] + fn on_runtime_upgrade() -> Weight { + let translate = |pre: v1::QueueConfigData| -> v2::QueueConfigData { + v2::QueueConfigData { + suspend_threshold: pre.suspend_threshold, + drop_threshold: pre.drop_threshold, + resume_threshold: pre.resume_threshold, + threshold_weight: Weight::from_parts(pre.threshold_weight, 0), + weight_restrict_decay: Weight::from_parts(pre.weight_restrict_decay, 0), + xcmp_max_individual_weight: Weight::from_parts( + pre.xcmp_max_individual_weight, + DEFAULT_POV_SIZE, + ), + } + }; + + if v2::QueueConfig::::translate(|pre| pre.map(translate)).is_err() { + log::error!( + target: crate::LOG_TARGET, + "unexpected error when performing translation of the QueueConfig type \ + during storage upgrade to v2" + ); + } + + T::DbWeight::get().reads_writes(1, 1) + } + } + + /// [`UncheckedMigrationToV2`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 1. + #[allow(dead_code)] + pub type MigrationToV2 = frame_support::migrations::VersionedMigration< + 1, + 2, + UncheckedMigrationToV2, + Pallet, + ::DbWeight, + >; +} + pub mod v3 { use super::*; use crate::*; @@ -101,6 +159,10 @@ pub mod v3 { OptionQuery, >; + #[frame_support::storage_alias] + pub(crate) type QueueConfig = + StorageValue, v2::QueueConfigData, ValueQuery>; + #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, TypeInfo)] pub struct InboundChannelDetails { /// The `ParaId` of the parachain that this channel is connected with. @@ -121,98 +183,135 @@ pub mod v3 { Ok, Suspended, } -} -/// Migrates `QueueConfigData` from v1 (using only reference time weights) to v2 (with -/// 2D weights). -/// -/// NOTE: Only use this function if you know what you're doing. Default to using -/// `migrate_to_latest`. -#[allow(deprecated)] -pub fn migrate_to_v2() -> Weight { - let translate = |pre: v1::QueueConfigData| -> super::QueueConfigData { - super::QueueConfigData { - suspend_threshold: pre.suspend_threshold, - drop_threshold: pre.drop_threshold, - resume_threshold: pre.resume_threshold, - threshold_weight: Weight::from_parts(pre.threshold_weight, 0), - weight_restrict_decay: Weight::from_parts(pre.weight_restrict_decay, 0), - xcmp_max_individual_weight: Weight::from_parts( - pre.xcmp_max_individual_weight, - DEFAULT_POV_SIZE, - ), - } - }; + /// Migrates the pallet storage to v3. + pub struct UncheckedMigrationToV3(PhantomData); - if QueueConfig::::translate(|pre| pre.map(translate)).is_err() { - log::error!( - target: super::LOG_TARGET, - "unexpected error when performing translation of the QueueConfig type during storage upgrade to v2" - ); + impl OnRuntimeUpgrade for UncheckedMigrationToV3 { + fn on_runtime_upgrade() -> Weight { + #[frame_support::storage_alias] + type Overweight = + CountedStorageMap, Twox64Concat, OverweightIndex, ParaId>; + let overweight_messages = Overweight::::initialize_counter() as u64; + + T::DbWeight::get().reads_writes(overweight_messages, 1) + } } - T::DbWeight::get().reads_writes(1, 1) -} + /// [`UncheckedMigrationToV3`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 2. + pub type MigrationToV3 = frame_support::migrations::VersionedMigration< + 2, + 3, + UncheckedMigrationToV3, + Pallet, + ::DbWeight, + >; -pub fn migrate_to_v3() -> Weight { - #[frame_support::storage_alias] - type Overweight = - CountedStorageMap, Twox64Concat, OverweightIndex, ParaId>; - let overweight_messages = Overweight::::initialize_counter() as u64; + pub fn lazy_migrate_inbound_queue() { + let Some(mut states) = v3::InboundXcmpStatus::::get() else { + log::debug!(target: LOG, "Lazy migration finished: item gone"); + return + }; + let Some(ref mut next) = states.first_mut() else { + log::debug!(target: LOG, "Lazy migration finished: item empty"); + v3::InboundXcmpStatus::::kill(); + return + }; + log::debug!( + "Migrating inbound HRMP channel with sibling {:?}, msgs left {}.", + next.sender, + next.message_metadata.len() + ); + // We take the last element since the MQ is a FIFO and we want to keep the order. + let Some((block_number, format)) = next.message_metadata.pop() else { + states.remove(0); + v3::InboundXcmpStatus::::put(states); + return + }; + if format != XcmpMessageFormat::ConcatenatedVersionedXcm { + log::warn!(target: LOG, + "Dropping message with format {:?} (not ConcatenatedVersionedXcm)", + format + ); + v3::InboundXcmpMessages::::remove(&next.sender, &block_number); + v3::InboundXcmpStatus::::put(states); + return + } - T::DbWeight::get().reads_writes(overweight_messages, 1) -} + let Some(msg) = v3::InboundXcmpMessages::::take(&next.sender, &block_number) else { + defensive!("Storage corrupted: HRMP message missing:", (next.sender, block_number)); + v3::InboundXcmpStatus::::put(states); + return + }; -pub fn lazy_migrate_inbound_queue() { - let Some(mut states) = v3::InboundXcmpStatus::::get() else { - log::debug!(target: LOG, "Lazy migration finished: item gone"); - return - }; - let Some(ref mut next) = states.first_mut() else { - log::debug!(target: LOG, "Lazy migration finished: item empty"); - v3::InboundXcmpStatus::::kill(); - return - }; - log::debug!( - "Migrating inbound HRMP channel with sibling {:?}, msgs left {}.", - next.sender, - next.message_metadata.len() - ); - // We take the last element since the MQ is a FIFO and we want to keep the order. - let Some((block_number, format)) = next.message_metadata.pop() else { - states.remove(0); - v3::InboundXcmpStatus::::put(states); - return - }; - if format != XcmpMessageFormat::ConcatenatedVersionedXcm { - log::warn!(target: LOG, - "Dropping message with format {:?} (not ConcatenatedVersionedXcm)", - format - ); - v3::InboundXcmpMessages::::remove(&next.sender, &block_number); + let Ok(msg): Result, _> = msg.try_into() else { + log::error!(target: LOG, "Message dropped: too big"); + v3::InboundXcmpStatus::::put(states); + return + }; + + // Finally! We have a proper message. + T::XcmpQueue::enqueue_message(msg.as_bounded_slice(), next.sender); + log::debug!(target: LOG, "Migrated HRMP message to MQ: {:?}", (next.sender, block_number)); v3::InboundXcmpStatus::::put(states); - return } +} - let Some(msg) = v3::InboundXcmpMessages::::take(&next.sender, &block_number) else { - defensive!("Storage corrupted: HRMP message missing:", (next.sender, block_number)); - v3::InboundXcmpStatus::::put(states); - return - }; +pub mod v4 { + use super::*; - let Ok(msg): Result, _> = msg.try_into() else { - log::error!(target: LOG, "Message dropped: too big"); - v3::InboundXcmpStatus::::put(states); - return - }; + /// Migrates `QueueConfigData` to v4, removing deprecated fields and bumping page + /// thresholds to at least the default values. + pub struct UncheckedMigrationToV4(PhantomData); + + impl OnRuntimeUpgrade for UncheckedMigrationToV4 { + fn on_runtime_upgrade() -> Weight { + let translate = |pre: v2::QueueConfigData| -> QueueConfigData { + let pre_default = v2::QueueConfigData::default(); + // If the previous values are the default ones, let's replace them with the new + // default. + if pre.suspend_threshold == pre_default.suspend_threshold && + pre.drop_threshold == pre_default.drop_threshold && + pre.resume_threshold == pre_default.resume_threshold + { + return QueueConfigData::default() + } + + // If the previous values are not the default ones, let's leave them as they are. + QueueConfigData { + suspend_threshold: pre.suspend_threshold, + drop_threshold: pre.drop_threshold, + resume_threshold: pre.resume_threshold, + } + }; + + if QueueConfig::::translate(|pre| pre.map(translate)).is_err() { + log::error!( + target: crate::LOG_TARGET, + "unexpected error when performing translation of the QueueConfig type \ + during storage upgrade to v4" + ); + } - // Finally! We have a proper message. - T::XcmpQueue::enqueue_message(msg.as_bounded_slice(), next.sender); - log::debug!(target: LOG, "Migrated HRMP message to MQ: {:?}", (next.sender, block_number)); - v3::InboundXcmpStatus::::put(states); + T::DbWeight::get().reads_writes(1, 1) + } + } + + /// [`UncheckedMigrationToV4`] wrapped in a + /// [`VersionedMigration`](frame_support::migrations::VersionedMigration), ensuring the + /// migration is only performed when on-chain version is 3. + pub type MigrationToV4 = frame_support::migrations::VersionedMigration< + 3, + 4, + UncheckedMigrationToV4, + Pallet, + ::DbWeight, + >; } -#[cfg(test)] +#[cfg(all(feature = "try-runtime", test))] mod tests { use super::*; use crate::mock::{new_test_ext, Test}; @@ -230,14 +329,20 @@ mod tests { }; new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(1); + storage_version.put::>(); + frame_support::storage::unhashed::put_raw( &crate::QueueConfig::::hashed_key(), &v1.encode(), ); - migrate_to_v2::(); + let bytes = v2::MigrationToV2::::pre_upgrade(); + assert!(bytes.is_ok()); + v2::MigrationToV2::::on_runtime_upgrade(); + assert!(v2::MigrationToV2::::post_upgrade(bytes.unwrap()).is_ok()); - let v2 = crate::QueueConfig::::get(); + let v2 = v2::QueueConfig::::get(); assert_eq!(v1.suspend_threshold, v2.suspend_threshold); assert_eq!(v1.drop_threshold, v2.drop_threshold); @@ -247,4 +352,70 @@ mod tests { assert_eq!(v1.xcmp_max_individual_weight, v2.xcmp_max_individual_weight.ref_time()); }); } + + #[test] + #[allow(deprecated)] + fn test_migration_to_v4() { + new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(3); + storage_version.put::>(); + + let v2 = v2::QueueConfigData { + drop_threshold: 5, + suspend_threshold: 2, + resume_threshold: 1, + ..Default::default() + }; + + frame_support::storage::unhashed::put_raw( + &crate::QueueConfig::::hashed_key(), + &v2.encode(), + ); + + let bytes = v4::MigrationToV4::::pre_upgrade(); + assert!(bytes.is_ok()); + v4::MigrationToV4::::on_runtime_upgrade(); + assert!(v4::MigrationToV4::::post_upgrade(bytes.unwrap()).is_ok()); + + let v4 = QueueConfig::::get(); + + assert_eq!( + v4, + QueueConfigData { suspend_threshold: 32, drop_threshold: 48, resume_threshold: 8 } + ); + }); + + new_test_ext().execute_with(|| { + let storage_version = StorageVersion::new(3); + storage_version.put::>(); + + let v2 = v2::QueueConfigData { + drop_threshold: 100, + suspend_threshold: 50, + resume_threshold: 40, + ..Default::default() + }; + + frame_support::storage::unhashed::put_raw( + &crate::QueueConfig::::hashed_key(), + &v2.encode(), + ); + + let bytes = v4::MigrationToV4::::pre_upgrade(); + assert!(bytes.is_ok()); + v4::MigrationToV4::::on_runtime_upgrade(); + assert!(v4::MigrationToV4::::post_upgrade(bytes.unwrap()).is_ok()); + + let v4 = QueueConfig::::get(); + + assert_eq!( + v4, + QueueConfigData { + suspend_threshold: 50, + drop_threshold: 100, + resume_threshold: 40 + } + ); + }); + } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index c6c9735ecb1..6b1948e7125 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -954,8 +954,12 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -pub type Migrations = - (pallet_collator_selection::migration::v1::MigrateToV1, InitStorageVersions); +pub type Migrations = ( + pallet_collator_selection::migration::v1::MigrateToV1, + InitStorageVersions, + // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, +); /// Migration to initialize storage versions for pallets added after genesis. /// diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 4eb3e2471ce..bd97c1c2ca5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -941,6 +941,8 @@ pub type Migrations = ( InitStorageVersions, // unreleased DeleteUndecodableStorage, + // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); /// Asset Hub Westend has some undecodable storage, delete it. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 152e071a26a..2fcc7121c0c 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -118,6 +118,7 @@ pub type Migrations = ( pallet_collator_selection::migration::v1::MigrateToV1, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); /// Migration to initialize storage versions for pallets added after genesis. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 7b8ea1b7734..121aa5f0e86 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -118,6 +118,8 @@ pub type Migrations = ( pallet_collator_selection::migration::v1::MigrateToV1, pallet_multisig::migrations::v1::MigrateToV1, InitStorageVersions, + // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); /// Migration to initialize storage versions for pallets added after genesis. diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 1017ac30499..be82cf32fbb 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -707,6 +707,8 @@ pub type UncheckedExtrinsic = type Migrations = ( // unreleased pallet_collator_selection::migration::v1::MigrateToV1, + // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); /// Executive: handles dispatch to the various modules. diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 77bc93fb968..33c2e26573e 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -100,9 +100,11 @@ pub type UncheckedExtrinsic = /// Migrations to apply on runtime upgrade. pub type Migrations = ( cumulus_pallet_parachain_system::migration::Migration, - cumulus_pallet_xcmp_queue::migration::MigrationToV3, + cumulus_pallet_xcmp_queue::migration::v2::MigrationToV2, + cumulus_pallet_xcmp_queue::migration::v3::MigrationToV3, pallet_contracts::Migration, // unreleased + cumulus_pallet_xcmp_queue::migration::v4::MigrationToV4, ); type EventRecord = frame_system::EventRecord< diff --git a/prdoc/pr_2142.prdoc b/prdoc/pr_2142.prdoc new file mode 100644 index 00000000000..ae2ac3db9dd --- /dev/null +++ b/prdoc/pr_2142.prdoc @@ -0,0 +1,14 @@ +title: Cleanup XCMP `QueueConfigData` + +doc: + - audience: Parachain Dev + description: Removes obsolete fields from the `QueueConfigData` structure. For the remaining fields, if they use the old defaults, we replace them with the new defaults. + +migrations: + runtime: + - pallet: "cumulus_pallet_xcmp_queue" + description: "v4: Removes obsolete fields from the `QueueConfigData` structure. For the remaining fields, if they use the old defaults, we replace them with the new defaults." + +crates: [] + +host_functions: [] -- GitLab From 756a12d57134c3318c0dd3b5d9b9a304026b4aa7 Mon Sep 17 00:00:00 2001 From: Chevdor Date: Mon, 4 Dec 2023 15:25:57 +0100 Subject: [PATCH 185/199] PRDoc new schema (#1946) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Overview This PR brings in the new version of prdoc v0.0.6 and allows: - local schema - local config - local template It also fixes the existing prdoc files to match the new schema. ## todo - [x] add a brief doc/tldr to help contributors get started - [x] test CI - [x] finalize schema - [x] publish the next `prdoc` cli version (v0.0.7 or above) --------- Co-authored-by: Egor_P Co-authored-by: Bastian Köcher --- .github/review-bot.yml | 12 +- .github/workflows/check-prdoc.yml | 36 +++-- .prdoc.toml | 7 + docs/CONTRIBUTING.md | 14 +- docs/DOCUMENTATION_GUIDELINES.md | 6 +- docs/prdoc.md | 71 +++++++++ prdoc/.template.prdoc | 11 ++ prdoc/pr_1226.prdoc | 15 +- prdoc/pr_1234.prdoc | 11 +- prdoc/pr_1255.prdoc | 15 +- prdoc/pr_1408_prodc-introduction.prdoc | 11 +- prdoc/pr_1818.prdoc | 11 +- prdoc/pr_1873.prdoc | 12 +- prdoc/pr_1913.prdoc | 7 - prdoc/pr_1921.prdoc | 11 +- prdoc/pr_1946_prdoc_new_schema.prdoc | 14 ++ prdoc/schema_user.json | 212 +++++++++++++++++++++++++ 17 files changed, 376 insertions(+), 100 deletions(-) create mode 100644 .prdoc.toml create mode 100644 docs/prdoc.md create mode 100644 prdoc/.template.prdoc create mode 100644 prdoc/pr_1946_prdoc_new_schema.prdoc create mode 100644 prdoc/schema_user.json diff --git a/.github/review-bot.yml b/.github/review-bot.yml index c522988f02e..a5155949609 100644 --- a/.github/review-bot.yml +++ b/.github/review-bot.yml @@ -1,7 +1,7 @@ rules: - name: CI files condition: - include: + include: - ^\.gitlab-ci\.yml - ^docker/.* - ^\.github/.* @@ -19,12 +19,12 @@ rules: - name: Audit rules type: basic condition: - include: + include: - ^polkadot/runtime\/(kusama|polkadot|common)\/.* - ^polkadot/primitives/src\/.+\.rs$ - ^substrate/primitives/.* - ^substrate/frame/.* - exclude: + exclude: - ^polkadot/runtime\/(kusama|polkadot)\/src\/weights\/.+\.rs$ - ^substrate\/frame\/.+\.md$ minApprovals: 1 @@ -80,7 +80,7 @@ rules: # if there are any changes in the bridges subtree (in case of backport changes back to bridges repo) - name: Bridges subtree files type: basic - condition: + condition: include: - ^bridges/.* minApprovals: 1 @@ -91,7 +91,7 @@ rules: - name: FRAME coders substrate condition: - include: + include: - ^substrate/frame/(?!.*(nfts/.*|uniques/.*|babe/.*|grandpa/.*|beefy|merkle-mountain-range/.*|contracts/.*|election|nomination-pools/.*|staking/.*|aura/.*)) type: "and" reviewers: @@ -105,7 +105,7 @@ rules: # Protection of THIS file - name: Review Bot condition: - include: + include: - review-bot\.yml type: "and" reviewers: diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index 54e4d2b9680..d5cb88a4255 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -6,26 +6,27 @@ on: merge_group: env: - IMAGE: paritytech/prdoc:v0.0.5 + IMAGE: docker.io/paritytech/prdoc:v0.0.7 API_BASE: https://api.github.com/repos REPO: ${{ github.repository }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_PR: ${{ github.event.pull_request.number }} - MOUNT: /prdoc ENGINE: docker + PRDOC_DOC: https://github.com/paritytech/polkadot-sdk/blob/master/docs/prdoc.md jobs: check-prdoc: runs-on: ubuntu-latest steps: + # we cannot show the version in this step (ie before checking out the repo) + # due to https://github.com/paritytech/prdoc/issues/15 - name: Skip merge queue if: ${{ contains(github.ref, 'gh-readonly-queue') }} run: exit 0 - name: Pull image run: | echo "Pulling $IMAGE" - docker pull $IMAGE - docker run --rm $IMAGE --version + $ENGINE pull $IMAGE - name: Check if PRdoc is required id: get-labels @@ -36,18 +37,29 @@ jobs: echo "Labels: ${labels}" echo "labels=${labels}" >> "$GITHUB_OUTPUT" - - name: No PRdoc required + - name: Checkout repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 + + - name: Check PRDoc version + run: | + $ENGINE run --rm -v $PWD:/repo $IMAGE --version + + - name: Early exit if PR is silent if: ${{ contains(steps.get-labels.outputs.labels, 'R0') }} run: | - echo "PR detected as silent, no PRdoc is required, exiting..." + hits=$(find prdoc -name "pr_$GITHUB_PR*.prdoc" | wc -l) + if (( hits > 0 )); then + echo "PR detected as silent, but a PRDoc was found, checking it as information" + $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} || echo "Ignoring failure" + else + echo "PR detected as silent, no PRDoc found, exiting..." + fi + echo "If you want to add a PRDoc, please refer to $PRDOC_DOC" exit 0 - - name: Checkout repo - if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 #v4.1.1 - - name: PRdoc check for PR#${{ github.event.pull_request.number }} if: ${{ !contains(steps.get-labels.outputs.labels, 'R0') }} run: | - echo "Checking for PR#${GITHUB_PR} in $MOUNT" - $ENGINE run --rm -v $PWD/prdoc:/doc $IMAGE check -n ${GITHUB_PR} || true + echo "Checking for PR#${GITHUB_PR}" + echo "You can find more information about PRDoc at $PRDOC_DOC" + $ENGINE run --rm -v $PWD:/repo $IMAGE check -n ${GITHUB_PR} diff --git a/.prdoc.toml b/.prdoc.toml new file mode 100644 index 00000000000..01e2eebe54b --- /dev/null +++ b/.prdoc.toml @@ -0,0 +1,7 @@ +# Config file for prdoc, see https://github.com/paritytech/prdoc + +version = 1 +schema = "prdoc/schema_user.json" +output_dir = "prdoc" +prdoc_folders = ["prdoc"] +template = "prdoc/.template.prdoc" diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index 1f68e2979f2..3350d134414 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -93,22 +93,12 @@ The reviewers are also responsible to check: All Pull Requests must contain proper title & description. -Some Pull Requests can be exempt of `prdoc` documentation, those -must be labelled with +Some Pull Requests can be exempt of `prdoc` documentation, those must be labelled with [`R0-silent`](https://github.com/paritytech/labels/blob/main/ruled_labels/specs_polkadot-sdk.yaml#L89-L91). Non "silent" PRs must come with documentation in the form of a `.prdoc` file. -A `.prdoc` documentation is made of a text file (YAML) named `/prdoc/pr_NNNN.prdoc` where `NNNN` is the PR number. -For convenience, those file can also contain a short description/title: `/prdoc/pr_NNNN_pr-foobar.prdoc`. -The CI automation checks for the presence and validity of a `prdoc` in the `/prdoc` folder. -Those files need to comply with a specific [schema](https://github.com/paritytech/prdoc/blob/master/schema_user.json). It -is highly recommended to [make your editor aware](https://github.com/paritytech/prdoc#schemas) of the schema as it is -self-described and will assist you in writing correct content. - -This schema is also embedded in the -[prdoc](https://github.com/paritytech/prdoc) utility that can also be used to generate and check the validity of a -`prdoc` locally. +See more about `prdoc` [here](./prdoc.md) ## Helping out diff --git a/docs/DOCUMENTATION_GUIDELINES.md b/docs/DOCUMENTATION_GUIDELINES.md index 5d1164e8ca8..96811a2772d 100644 --- a/docs/DOCUMENTATION_GUIDELINES.md +++ b/docs/DOCUMENTATION_GUIDELINES.md @@ -225,7 +225,7 @@ For the top-level pallet docs, consider the following template: //! //! ## Pallet API //! -//! //! //! See the [`pallet`] module for more information about the interfaces this pallet exposes, including its @@ -349,3 +349,7 @@ Consider the fact that, similar to dispatchables, these docs will be part of the and might be used by wallets and explorers. Specifically for `error`, explain why the error has happened, and what can be done in order to avoid it. + +## Documenting Changes/PR + +See [PRDoc](./prdoc.md). diff --git a/docs/prdoc.md b/docs/prdoc.md new file mode 100644 index 00000000000..af0ede5107a --- /dev/null +++ b/docs/prdoc.md @@ -0,0 +1,71 @@ +# PRDoc + +## Intro + +With the merge of [PR #1946](https://github.com/paritytech/polkadot-sdk/pull/1946), a new method for +documenting changes has been introduced: `prdoc`. The [prdoc repository](https://github.com/paritytech/prdoc) +contains more documentation and tooling. + +The current document describes how to quickly get started authoring `PRDoc` files. + +## Requirements + +When creating a PR, the author needs to decides with the `R0` label whether the change (PR) should +appear in the release notes or not. + +Labelling a PR with `R0` means that no `PRDoc` is required. + +A PR without the `R0` label **does** require a valid `PRDoc` file to be introduced in the PR. + +## PRDoc how-to + +A `.prdoc` file is a YAML file with a defined structure (ie JSON Schema). + +For significant changes, a `.prdoc` file is mandatory and the file must meet the following +requirements: +- file named `pr_NNNN.prdoc` where `NNNN` is the PR number. + For convenience, those file can also contain a short description: `pr_NNNN_foobar.prdoc`. +- located under the [`prdoc` folder](https://github.com/paritytech/polkadot-sdk/tree/master/prdoc) of the repository +- compliant with the [JSON schema](https://json-schema.org/) defined in `prdoc/schema_user.json` + +Those requirements can be fulfilled manually without any tooling but a text editor. + +## Tooling + +Users might find the following helpers convenient: +- Setup VSCode to be aware of the prdoc schema: see [using VSCode](https://github.com/paritytech/prdoc#using-vscode) +- Using the `prdoc` cli to: + - generate a `PRDoc` file from a [template defined in the Polkadot SDK + repo](https://github.com/paritytech/polkadot-sdk/blob/master/prdoc/.template.prdoc) simply providing a PR number + - check the validity of one or more `PRDoc` files + +## `prdoc` cli usage + +The `prdoc` cli documentation can be found at https://github.com/paritytech/prdoc#prdoc + +tldr: +- `prdoc generate ` +- `prdoc check -n ` + +where is the PR number. + +## Pick an audience + +While describing a PR, the author needs to consider which audience(s) need to be addressed. +The list of valid audiences is described and documented in the JSON schema as follow: + +- `Node Dev`: Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs. + These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol + itself. + +- `Runtime Dev`: All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a + pallet. These are people who care about the protocol (WASM), not the meta-protocol (client). + +- `Node Operator`: Those who don't write any code and only run code. + +- `Runtime User`: Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain. + +## Tips + +The PRDoc schema is defined in each repo and usually is quite restrictive. +You cannot simply add a new property to a `PRDoc` file unless the Schema allows it. diff --git a/prdoc/.template.prdoc b/prdoc/.template.prdoc new file mode 100644 index 00000000000..097741f388c --- /dev/null +++ b/prdoc/.template.prdoc @@ -0,0 +1,11 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: ... + +doc: + - audience: Node Dev + description: | + ... + +crates: [ ] diff --git a/prdoc/pr_1226.prdoc b/prdoc/pr_1226.prdoc index df7a425b538..caef324bfd0 100644 --- a/prdoc/pr_1226.prdoc +++ b/prdoc/pr_1226.prdoc @@ -1,17 +1,12 @@ title: Removed deprecated `Balances::transfer` and `Balances::set_balance_deprecated` functions. doc: - - audience: Builder - description: The Balances pallet's dispatchables `set_balance_deprecated` and `transfer` were deprecated in [paritytech/substrate#12951](https://github.com/paritytech/substrate/pull/12951) and have now been removed. - notes: - - Use `set_balance_deprecated` instead `force_set_balance` and `transfer_allow_death` instead of `transfer`. - -migrations: - db: [] + - audience: Runtime User + description: | + The Balances pallet's dispatchables `set_balance_deprecated` and `transfer` were deprecated in [paritytech/substrate#12951](https://github.com/paritytech/substrate/pull/12951) and have now been removed. - runtime: [] + notes: + - Use `set_balance_deprecated` instead `force_set_balance` and `transfer_allow_death` instead of `transfer`. crates: - name: pallet-balances - -host_functions: [] diff --git a/prdoc/pr_1234.prdoc b/prdoc/pr_1234.prdoc index cc22a02d88b..e1e5d71050a 100644 --- a/prdoc/pr_1234.prdoc +++ b/prdoc/pr_1234.prdoc @@ -4,17 +4,10 @@ title: Introduce XcmFeesToAccount fee manager doc: - - audience: Builder + - audience: Runtime User description: | Now all XCM sending, unless done by the system for the system, will be charged delivery fees. All runtimes are now configured to send these delivery fees to a treasury account. The fee formula is `delivery_fee_factor * (base_fee + encoded_msg_len * per_byte_fee)`. -migrations: - db: [] - - runtime: [] - -crates: [] - -host_functions: [] +crates: [ ] diff --git a/prdoc/pr_1255.prdoc b/prdoc/pr_1255.prdoc index 793b5c3c859..c00a7c307e9 100644 --- a/prdoc/pr_1255.prdoc +++ b/prdoc/pr_1255.prdoc @@ -4,19 +4,18 @@ title: Fix for Reward Deficit in the pool doc: - - audience: Core Dev - description: Instead of fragile calculation of current balance by looking at free balance - ED, Nomination Pool now freezes ED in the pool reward account to restrict an account from going below minimum balance. This also has a nice side effect that if ED changes, we know how much is the imbalance in ED frozen in the pool and the current required ED. A pool operator can diligently top up the pool with the deficit in ED or vice versa, withdraw the excess they transferred to the pool. - notes: + - audience: Runtime Dev + description: | + Instead of fragile calculation of current balance by looking at free balance - ED, Nomination Pool now freezes ED in the pool reward account to restrict an account from going below minimum balance. This also has a nice side effect that if ED changes, we know how much is the imbalance in ED frozen in the pool and the current required ED. A pool operator can diligently top up the pool with the deficit in ED or vice versa, withdraw the excess they transferred to the pool. + + notes: - Introduces new call `adjust_pool_deposit` that allows to top up the deficit or withdraw the excess deposit for the pool. - Switch to using Fungible trait from Currency trait. migrations: - db: [] - runtime: - - { pallet: "pallet-nomination-pools", description: "One time migration of freezing ED from each of the existing pools."} + - reference: pallet-nomination-pools + description: One time migration of freezing ED from each of the existing pools. crates: - name: pallet-nomination-pools - -host_functions: [] \ No newline at end of file diff --git a/prdoc/pr_1408_prodc-introduction.prdoc b/prdoc/pr_1408_prodc-introduction.prdoc index 4b10e0fe2e8..85b4661b127 100644 --- a/prdoc/pr_1408_prodc-introduction.prdoc +++ b/prdoc/pr_1408_prodc-introduction.prdoc @@ -2,18 +2,11 @@ title: PRdoc check doc: - - audience: Core Dev + - audience: Node Dev description: | This PRdoc is an **example**. This PR brings support and automated checks for documentation in the form of a [`prdoc`](https://github.com/paritytech/prdoc/) file. -migrations: - db: [] - - runtime: [] - -crates: [] - -host_functions: [] +crates: [ ] diff --git a/prdoc/pr_1818.prdoc b/prdoc/pr_1818.prdoc index cbafa02f9af..0f59a0f9124 100644 --- a/prdoc/pr_1818.prdoc +++ b/prdoc/pr_1818.prdoc @@ -1,16 +1,9 @@ title: FRAME pallets warning for unchecked weight witness doc: - - audience: Core Dev + - audience: Runtime Dev description: | FRAME pallets now emit a warning when a call uses a function argument that starts with an underscore in its weight declaration. -migrations: - db: [ ] - runtime: [ ] - -host_functions: [] - crates: -- name: "frame-support-procedural" - semver: minor + - name: frame-support-procedural diff --git a/prdoc/pr_1873.prdoc b/prdoc/pr_1873.prdoc index 6f3bc7646db..c22b732c72f 100644 --- a/prdoc/pr_1873.prdoc +++ b/prdoc/pr_1873.prdoc @@ -1,15 +1,9 @@ title: Message Queue use proper overweight limit doc: - - audience: Core Dev + - audience: Node Dev description: | Changed the overweight cutoff limit from the full `Config::ServiceWeight` to a lower value that is calculated based on the weight of the functions being called. -migrations: - db: [] - - runtime: [] - -crates: ["pallet-message-queue", patch] - -host_functions: [] +crates: + - name: pallet-message-queue diff --git a/prdoc/pr_1913.prdoc b/prdoc/pr_1913.prdoc index 155057054eb..c2e7627c9ac 100644 --- a/prdoc/pr_1913.prdoc +++ b/prdoc/pr_1913.prdoc @@ -7,13 +7,6 @@ doc: If experiencing stability issues caused by BEEFY, it can be disabled using `--no-beefy` flag. BEEFY doesn't (yet) support warp sync. So, attempting to Warp sync as a validator will throw an error. -migrations: - db: [] - - runtime: [] - crates: - name: polkadot-cli - name: polkadot-service - -host_functions: [] diff --git a/prdoc/pr_1921.prdoc b/prdoc/pr_1921.prdoc index 5ed0137cd5f..e71a68fa829 100644 --- a/prdoc/pr_1921.prdoc +++ b/prdoc/pr_1921.prdoc @@ -1,19 +1,14 @@ title: Fix para-scheduler migration doc: - - audience: Core Dev + - audience: Runtime Dev description: | Changing the `MigrateToV1` migration in the `ParachainScheduler` pallet to be truly idempotent. It is achieved by wrapping it in a `VersionedMigration`. migrations: - db: [] - runtime: - - pallet: "ParachainScheduler" + - reference: ParachainScheduler description: Non-critical fixup for `MigrateToV1`. crates: - - name: "polkadot-runtime-parachains" - semver: patch - -host_functions: [] + - name: polkadot-runtime-parachains diff --git a/prdoc/pr_1946_prdoc_new_schema.prdoc b/prdoc/pr_1946_prdoc_new_schema.prdoc new file mode 100644 index 00000000000..c0632177738 --- /dev/null +++ b/prdoc/pr_1946_prdoc_new_schema.prdoc @@ -0,0 +1,14 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: New PRDoc Schema + +doc: + - audience: Node Dev + description: &desc | + The new version of prdoc and the new schema is activated in this PR. + + - audience: Runtime Dev + description: *desc + +crates: [] diff --git a/prdoc/schema_user.json b/prdoc/schema_user.json new file mode 100644 index 00000000000..60ff28d3626 --- /dev/null +++ b/prdoc/schema_user.json @@ -0,0 +1,212 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema#", + "$id": "https://raw.githubusercontent.com/paritytech/prdoc/master/prdoc_schema_user.json", + "version": { + "major": 1, + "minor": 0, + "patch": 0, + "timestamp": 20230817152351 + }, + "title": "Polkadot SDK PRDoc Schema", + "description": "JSON Schema definition for the Polkadot SDK PR documentation", + "type": "object", + "additionalProperties": false, + "properties": { + "title": { + "title": "Title of the change", + "type": "string", + "description": "Title for the PR. This is what will show up in the release notes.\nif needed, you may provide a different title override for each audience in the `doc` property." + }, + + "doc": { + "type": "array", + "title": "Documentation adapted to the audience(s)", + "description": "Description of the PR. Provide a description for each relevant audience.\nSee the `audience` property for more documentation about audiences", + "items": { + "$ref": "#/$defs/doc" + }, + "minItems": 1 + }, + + "crates": { + "title": "Crates", + "description": "You have the option to provide a hint about the crates that have noticeable changes.\n This is used during the crate publishing to crates.io and to help users understand the impact of the changes introduced in your PR.", + "type": "array", + "items": { + "$ref": "#/$defs/crate" + } + }, + + "migrations": { + "title": "Migrations (DB & Runtime)", + "description": "It is important for users to be aware of migrations.\nMake sure to mention any migrations in the appropriate sub-properties:\n- db\n- runtime", + "type": "object", + "properties": { + "db": { + "type": "array", + "nullable": false, + "title": "Database Migration", + "description": "List of the Database Migrations or empty array: []", + "items": { + "$ref": "#/$defs/migration_db" + }, + "minItems": 0, + "required": [ + "name", + "description" + ] + }, + "runtime": { + "type": "array", + "title": "Runtime Migration", + "nullable": false, + "description": "List of the Runtime Migrations or empty array: []", + "minItems": 0, + "items": { + "$ref": "#/$defs/migration_runtime" + }, + "required": [ + "db", + "runtime" + ] + } + } + }, + "host_functions": { + "title": "Host Functions", + "description": "List of the host functions involved in this PR.", + "type": "array", + "items": { + "$ref": "#/$defs/host_function" + } + } + }, + "required": [ + "title", + "doc", + "crates" + ], + "$defs": { + "audience": { + "description": "You may pick one or more audiences and address those users with appropriate documentation, information and warning related to the PR.", + "oneOf": [ + {"const": "Node Dev", + "title": "Node Dev", + "description": "Those who build around the client side code. Alternative client builders, SMOLDOT, those who consume RPCs. These are people who are oblivious to the runtime changes. They only care about the meta-protocol, not the protocol itself."}, + + {"const": "Runtime Dev", + "title": "Runtime Dev", + "description": "All of those who rely on the runtime. A parachain team that is using a pallet. A DApp that is using a pallet. These are people who care about the protocol (WASM), not the meta-protocol (client)."}, + + {"const": "Node Operator", + "title": "Node Operator", + "description": "Those who don't write any code and only run code."}, + + {"const": "Runtime User", + "title": "Runtime User", + "description": "Anyone using the runtime. This can be a token holder or a dev writing a front end for a chain."} + ] + }, + "crate": { + "type": "object", + "description": "You have the option here to provide a hint about a crate that has changed to help with the publishing of crates.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "note": { + "type": "string" + } + } + }, + "migration_db": { + "type": "object", + "description": "This property allows the documentation of database migrations.", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "name", + "description" + ] + }, + "migration_runtime": { + "type": "object", + "description": "This property allows the documentation of runtime migrations.", + "properties": { + "reference": { + "title": "Migration reference", + "description": "Reference to the runtime migration", + "type": "string" + }, + "description": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "description" + ] + }, + "doc": { + "type": "object", + "description": "You have the the option to provide different description of your PR for different audiences.", + "additionalProperties": false, + "properties": { + "audience": { + "description": "The selected audience", + "$ref": "#/$defs/audience" + }, + "title": { + "type": "string", + "title": "Title for the audience", + "description": "Optional title override for the PR and for the current audience" + }, + "description": { + "title": "Description for the audience", + "description": "Description of the change", + "type": "string" + } + } + }, + "array_of_strings": { + "description": "An array of strings that can be empty", + "type": "array", + "items": { + "type": "string" + } + }, + "host_function": { + "type": "object", + "additionalProperties": false, + "title": "Host Functions", + "description": "List of host functions and their descriptions", + "properties": { + "name": { + "title": "Host function name", + "description": "Name or identifier to find the host function in the codebase", + "type": "string" + }, + "description": { + "title": "Host function description", + "description": "Short description of the host function", + "type": "string" + }, + "notes": { + "type": "string" + } + }, + "required": [ + "name", + "description" + ] + } + } + } -- GitLab From 84559b967be42a62c6a4844d06f626c64295c412 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Mon, 4 Dec 2023 16:28:47 +0100 Subject: [PATCH 186/199] [ci] Fix environment for merge-queue GHA (#2600) --- .github/workflows/merge-queue.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/merge-queue.yml b/.github/workflows/merge-queue.yml index f3fb7765ca6..cce326f4493 100644 --- a/.github/workflows/merge-queue.yml +++ b/.github/workflows/merge-queue.yml @@ -6,7 +6,7 @@ on: jobs: trigger-merge-queue-action: runs-on: ubuntu-latest - environment: master + environment: merge-queues steps: - name: Generate token id: app_token -- GitLab From 01bbd63d2ddc29e20413267a1ca1623856e51e58 Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Mon, 4 Dec 2023 17:02:41 +0100 Subject: [PATCH 187/199] Align style guide for `default = [ "std" ]` vs `default = ["std"]` (#2603) --- substrate/docs/STYLE_GUIDE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/docs/STYLE_GUIDE.md b/substrate/docs/STYLE_GUIDE.md index 6ea0755d080..d5e703b3fdf 100644 --- a/substrate/docs/STYLE_GUIDE.md +++ b/substrate/docs/STYLE_GUIDE.md @@ -157,7 +157,7 @@ format looks like this: - The feature is written as a single line if it fits within 80 chars: ```toml [features] -default = [ "std" ] +default = ["std"] ``` - Otherwise the feature is broken down into multiple lines with one entry per line. Each line is padded with one tab and -- GitLab From aa4754e30a8cfb5e9be3d88d3771df4232a3f5ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 4 Dec 2023 17:03:47 +0100 Subject: [PATCH 188/199] contracts-fixtures: Do not assume that `rustup` is installed (#2586) The build script was assuming that `rustup` is installed, which breaks the build on systems that do not use `rustup`. This pull request just fixes it by not panicking on the call to `rustup`. --- substrate/frame/contracts/fixtures/build.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/substrate/frame/contracts/fixtures/build.rs b/substrate/frame/contracts/fixtures/build.rs index ada2650c6db..49deb94a7fa 100644 --- a/substrate/frame/contracts/fixtures/build.rs +++ b/substrate/frame/contracts/fixtures/build.rs @@ -147,9 +147,7 @@ fn invoke_cargo_fmt<'a>( if !Command::new("rustup") .args(&["run", "nightly", "rustfmt", "--version"]) .output() - .expect("failed to execute process") - .status - .success() + .map_or(false, |o| o.status.success()) { return Ok(()) } @@ -169,10 +167,12 @@ fn invoke_cargo_fmt<'a>( let stderr = String::from_utf8_lossy(&fmt_res.stderr); eprintln!("{}\n{}", stdout, stderr); eprintln!( - "Fixtures files are not formatted.\nPlease run `rustup run nightly rustfmt --config-path {} {}/*.rs`", + "Fixtures files are not formatted.\n + Please run `rustup run nightly rustfmt --config-path {} {}/*.rs`", config_path.display(), contract_dir.display() ); + anyhow::bail!("Fixtures files are not formatted") } -- GitLab From e6b9da132dcb0570870a6f319fbe5aa7e5030b76 Mon Sep 17 00:00:00 2001 From: Evgeny Snitko Date: Mon, 4 Dec 2023 20:07:22 +0400 Subject: [PATCH 189/199] Cargo caching with forklift (#2466) This PR adds cargo caching feature with custom ['forklift' tool](https://gitlab.parity.io/parity/infrastructure/ci_cd/forklift/forklift) Forklift acts as RUSTC_WRAPPER, intercepts rustc calls and stores produced artifacts in S3 bucket (see [forklift readme](https://gitlab.parity.io/parity/infrastructure/ci_cd/forklift/forklift/-/blob/main/README.MD?ref_type=heads) for detailed description) All settings are made in [`.forklift` job's before_script](https://github.com/paritytech/polkadot-sdk/blob/es/forklift-test/.gitlab-ci.yml#L119) and affect all jobs that extend `.docker-env` job To disable feature set `FORKLIFT_BYPASS` variable to true in [project settings in gitlab](https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/settings/ci_cd) --- .gitlab-ci.yml | 39 ++++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index aada30f1dda..e8a91568ccf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -106,27 +106,37 @@ default: .docker-env: image: "${CI_IMAGE}" + variables: + FL_FORKLIFT_VERSION: !reference [.forklift, variables, FL_FORKLIFT_VERSION] before_script: - !reference [.common-before-script, before_script] - !reference [.prepare-env, before_script] - !reference [.rust-info-script, script] - - !reference [.rusty-cachier, before_script] + - !reference [.forklift-cache, before_script] tags: - linux-docker -# rusty-cachier's hidden job. Parts of this job are used to instrument the pipeline's other real jobs with rusty-cachier -# rusty-cachier's commands are described here: https://gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client#description -.rusty-cachier: +# +.forklift-cache: before_script: - # - curl -s https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.parity.io/parity/infrastructure/ci_cd/rusty-cachier/client/-/raw/release/util/install.sh | bash - # - mkdir -p cargo_home cargo_target_dir - # - export CARGO_HOME=$CI_PROJECT_DIR/cargo_home - # - export CARGO_TARGET_DIR=$CI_PROJECT_DIR/cargo_target_dir - # - find . \( -path ./cargo_target_dir -o -path ./cargo_home \) -prune -o -type f -exec touch -t 202005260100 {} + - # - git restore-mtime - # - rusty-cachier --version - # - rusty-cachier project touch-changed - - echo tbd + - 'curl --header "PRIVATE-TOKEN: $FL_CI_GROUP_TOKEN" -o forklift -L "${CI_API_V4_URL}/projects/676/packages/generic/forklift/${FL_FORKLIFT_VERSION}/forklift_${FL_FORKLIFT_VERSION}_linux_amd64"' + - chmod +x forklift + - mkdir .forklift + - cp $FL_FORKLIFT_CONFIG .forklift/config.toml + - export FORKLIFT_PACKAGE_SUFFIX=${CI_JOB_NAME/ [0-9 \/]*} + - shopt -s expand_aliases + - export PATH=$PATH:$(pwd) + - | + if [ "$FORKLIFT_BYPASS" != "true" ]; then + echo "FORKLIFT_BYPASS not set, creating alias cargo='forklift cargo'" + alias cargo="forklift cargo" + fi + - ls -al + - rm -f forklift.sock + - forklift clean + # + - echo "FL_FORKLIFT_VERSION ${FL_FORKLIFT_VERSION}" + - echo "FORKLIFT_PACKAGE_SUFFIX $FORKLIFT_PACKAGE_SUFFIX" .common-refs: rules: @@ -214,6 +224,9 @@ include: - project: parity/infrastructure/ci_cd/shared ref: main file: /common/ci-unified.yml + - project: parity/infrastructure/ci_cd/shared + ref: main + file: /common/forklift.yml # This job cancels the whole pipeline if any of provided jobs fail. # In a DAG, every jobs chain is executed independently of others. The `fail_fast` principle suggests # to fail the pipeline as soon as possible to shorten the feedback loop. -- GitLab From a1b2ecb902d6555880f1bd76027d3a5c03f38c26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 4 Dec 2023 19:43:27 +0100 Subject: [PATCH 190/199] pallet-ranked-collective: Ensure to cleanup state in `remove_member` (#2591) This ensures that we cleanup the state of `who` in `remove_member`. --------- Co-authored-by: Liam Aharon --- prdoc/pr_2591.prdoc | 9 +++ substrate/frame/ranked-collective/src/lib.rs | 25 ++++---- .../frame/ranked-collective/src/tests.rs | 57 ++++++++++--------- 3 files changed, 53 insertions(+), 38 deletions(-) create mode 100644 prdoc/pr_2591.prdoc diff --git a/prdoc/pr_2591.prdoc b/prdoc/pr_2591.prdoc new file mode 100644 index 00000000000..fe967cb6785 --- /dev/null +++ b/prdoc/pr_2591.prdoc @@ -0,0 +1,9 @@ +title: Ensure to cleanup state in remove_member + +doc: + - audience: Runtime Dev + description: | + Cleanes up the state properly if a member of a ranked collective is removed. + +crates: + - name: pallet-ranked-collective diff --git a/substrate/frame/ranked-collective/src/lib.rs b/substrate/frame/ranked-collective/src/lib.rs index deb1ccf2357..51ee7d7144b 100644 --- a/substrate/frame/ranked-collective/src/lib.rs +++ b/substrate/frame/ranked-collective/src/lib.rs @@ -663,16 +663,21 @@ pub mod pallet { } fn remove_from_rank(who: &T::AccountId, rank: Rank) -> DispatchResult { - let last_index = MemberCount::::get(rank).saturating_sub(1); - let index = IdToIndex::::get(rank, &who).ok_or(Error::::Corruption)?; - if index != last_index { - let last = - IndexToId::::get(rank, last_index).ok_or(Error::::Corruption)?; - IdToIndex::::insert(rank, &last, index); - IndexToId::::insert(rank, index, &last); - } - MemberCount::::mutate(rank, |r| r.saturating_dec()); - Ok(()) + MemberCount::::try_mutate(rank, |last_index| { + last_index.saturating_dec(); + let index = IdToIndex::::get(rank, &who).ok_or(Error::::Corruption)?; + if index != *last_index { + let last = IndexToId::::get(rank, *last_index) + .ok_or(Error::::Corruption)?; + IdToIndex::::insert(rank, &last, index); + IndexToId::::insert(rank, index, &last); + } + + IdToIndex::::remove(rank, who); + IndexToId::::remove(rank, last_index); + + Ok(()) + }) } /// Adds a member into the ranked collective at level 0. diff --git a/substrate/frame/ranked-collective/src/tests.rs b/substrate/frame/ranked-collective/src/tests.rs index 3a557065160..60c0da3d7ac 100644 --- a/substrate/frame/ranked-collective/src/tests.rs +++ b/substrate/frame/ranked-collective/src/tests.rs @@ -23,13 +23,10 @@ use frame_support::{ assert_noop, assert_ok, derive_impl, error::BadOrigin, parameter_types, - traits::{ConstU16, ConstU32, ConstU64, EitherOf, Everything, MapSuccess, Polling}, -}; -use sp_core::{Get, H256}; -use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup, ReduceBy}, - BuildStorage, + traits::{ConstU16, EitherOf, MapSuccess, Polling}, }; +use sp_core::Get; +use sp_runtime::{traits::ReduceBy, BuildStorage}; use super::*; use crate as pallet_ranked_collective; @@ -47,29 +44,7 @@ frame_support::construct_runtime!( #[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Test { - type BaseCallFilter = Everything; - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type Nonce = u64; - type RuntimeCall = RuntimeCall; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type BlockHashCount = ConstU64<250>; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = ConstU32<16>; } #[derive(Clone, PartialEq, Eq, Debug)] @@ -442,6 +417,32 @@ fn cleanup_works() { }); } +#[test] +fn remove_member_cleanup_works() { + new_test_ext().execute_with(|| { + assert_ok!(Club::add_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 1)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 2)); + assert_ok!(Club::add_member(RuntimeOrigin::root(), 3)); + assert_ok!(Club::promote_member(RuntimeOrigin::root(), 3)); + + assert_eq!(IdToIndex::::get(1, 2), Some(1)); + assert_eq!(IndexToId::::get(1, 1), Some(2)); + + assert_eq!(IdToIndex::::get(1, 3), Some(2)); + assert_eq!(IndexToId::::get(1, 2), Some(3)); + + assert_ok!(Club::remove_member(RuntimeOrigin::root(), 2, 1)); + + assert_eq!(IdToIndex::::get(1, 2), None); + assert_eq!(IndexToId::::get(1, 1), Some(3)); + + assert_eq!(IdToIndex::::get(1, 3), Some(1)); + assert_eq!(IndexToId::::get(1, 2), None); + }); +} + #[test] fn ensure_ranked_works() { new_test_ext().execute_with(|| { -- GitLab From 6d50cd43c0ad7007a6c1d2bdec05e80c4f01e75e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 23:19:59 +0200 Subject: [PATCH 191/199] Bump multihash from 0.17.0 to 0.18.1 (#2592) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [multihash](https://github.com/multiformats/rust-multihash) from 0.17.0 to 0.18.1.
Changelog

Sourced from multihash's changelog.

v0.18.1 (2023-04-14)

Bug Fixes

0.18.0 (2022-12-06)

⚠ BREAKING CHANGES

  • update to Rust edition 2021

  • Multihash::write() returns bytes written

    Prior to this change it returned an empty tuple (), now it returns the bytes written.

Features

Bug Fixes

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=multihash&package-manager=cargo&previous-version=0.17.0&new-version=0.18.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Kunert --- Cargo.lock | 105 ++++++++++++++++-- .../client/authority-discovery/Cargo.toml | 10 +- .../client/authority-discovery/src/worker.rs | 2 +- 3 files changed, 108 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7bec4ad24ba..bbaa3d053a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2425,7 +2425,7 @@ checksum = "b9b68e3193982cd54187d71afdb2a271ad4cf8af157858e9cb911b91321de143" dependencies = [ "core2", "multibase", - "multihash", + "multihash 0.17.0", "serde", "unsigned-varint", ] @@ -7193,7 +7193,7 @@ dependencies = [ "libp2p-identity", "log", "multiaddr", - "multihash", + "multihash 0.17.0", "multistream-select", "once_cell", "parking_lot 0.12.1", @@ -7253,7 +7253,7 @@ dependencies = [ "ed25519-dalek", "log", "multiaddr", - "multihash", + "multihash 0.17.0", "quick-protobuf", "rand 0.8.5", "sha2 0.10.7", @@ -7500,7 +7500,7 @@ dependencies = [ "libp2p-identity", "libp2p-noise", "log", - "multihash", + "multihash 0.17.0", "quick-protobuf", "quick-protobuf-codec", "rand 0.8.5", @@ -8167,7 +8167,7 @@ dependencies = [ "data-encoding", "log", "multibase", - "multihash", + "multihash 0.17.0", "percent-encoding", "serde", "static_assertions", @@ -8197,12 +8197,55 @@ dependencies = [ "blake3", "core2", "digest 0.10.7", - "multihash-derive", + "multihash-derive 0.8.0", "sha2 0.10.7", "sha3", "unsigned-varint", ] +[[package]] +name = "multihash" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" +dependencies = [ + "core2", + "digest 0.10.7", + "multihash-derive 0.8.0", + "sha2 0.10.7", + "unsigned-varint", +] + +[[package]] +name = "multihash" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" +dependencies = [ + "core2", + "unsigned-varint", +] + +[[package]] +name = "multihash-codetable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6d815ecb3c8238d00647f8630ede7060a642c9f704761cd6082cb4028af6935" +dependencies = [ + "blake2b_simd", + "blake2s_simd", + "blake3", + "core2", + "digest 0.10.7", + "multihash-derive 0.9.0", + "ripemd", + "serde", + "sha1", + "sha2 0.10.7", + "sha3", + "strobe-rs", +] + [[package]] name = "multihash-derive" version = "0.8.0" @@ -8217,6 +8260,31 @@ dependencies = [ "synstructure", ] +[[package]] +name = "multihash-derive" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "890e72cb7396cb99ed98c1246a97b243cc16394470d94e0bc8b0c2c11d84290e" +dependencies = [ + "core2", + "multihash 0.19.1", + "multihash-derive-impl", +] + +[[package]] +name = "multihash-derive-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38685e08adb338659871ecfc6ee47ba9b22dcc8abcf6975d379cc49145c3040" +dependencies = [ + "proc-macro-crate 1.3.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + [[package]] name = "multimap" version = "0.8.3" @@ -14220,6 +14288,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rle-decode-fast" version = "1.0.3" @@ -14775,7 +14852,8 @@ dependencies = [ "ip_network", "libp2p", "log", - "multihash", + "multihash 0.18.1", + "multihash-codetable", "parity-scale-codec", "prost", "prost-build", @@ -18353,6 +18431,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "strobe-rs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fabb238a1cccccfa4c4fb703670c0d157e1256c1ba695abf1b93bd2bb14bab2d" +dependencies = [ + "bitflags 1.3.2", + "byteorder", + "keccak", + "subtle 2.4.1", + "zeroize", +] + [[package]] name = "strsim" version = "0.10.0" diff --git a/substrate/client/authority-discovery/Cargo.toml b/substrate/client/authority-discovery/Cargo.toml index 4013c8951c4..1a4b23d3c6d 100644 --- a/substrate/client/authority-discovery/Cargo.toml +++ b/substrate/client/authority-discovery/Cargo.toml @@ -22,7 +22,10 @@ futures = "0.3.21" futures-timer = "3.0.1" ip_network = "0.4.1" libp2p = { version = "0.51.3", features = ["ed25519", "kad"] } -multihash = { version = "0.17.0", default-features = false, features = ["sha2", "std"] } +multihash = { version = "0.18.1", default-features = false, features = [ + "sha2", + "std", +] } log = "0.4.17" prost = "0.11" rand = "0.8.5" @@ -37,6 +40,11 @@ sp-core = { path = "../../primitives/core" } sp-keystore = { path = "../../primitives/keystore" } sp-runtime = { path = "../../primitives/runtime" } async-trait = "0.1.56" +multihash-codetable = { version = "0.1.1", features = [ + "serde", + "sha2", + "digest", +] } [dev-dependencies] quickcheck = { version = "1.0.3", default-features = false } diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index a29e74df9ac..6db25416dee 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -35,7 +35,7 @@ use addr_cache::AddrCache; use codec::{Decode, Encode}; use ip_network::IpNetwork; use libp2p::{core::multiaddr, identity::PublicKey, multihash::Multihash, Multiaddr, PeerId}; -use multihash::{Code, MultihashDigest}; +use multihash_codetable::{Code, MultihashDigest}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; -- GitLab From a9738adedb74e2ef068d433f2edb5690ebfc0088 Mon Sep 17 00:00:00 2001 From: Just van Stam Date: Mon, 4 Dec 2023 23:02:16 +0100 Subject: [PATCH 192/199] Fix XCMP max message size check (#1250) Improved max message size logic and added test for it --------- Co-authored-by: Francisco Aguirre Co-authored-by: command-bot <> --- cumulus/pallets/xcmp-queue/src/lib.rs | 14 ++++++-- cumulus/pallets/xcmp-queue/src/tests.rs | 44 +++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index d3443163d08..71cd21d45f7 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -454,11 +454,21 @@ impl Pallet { ) -> Result { let encoded_fragment = fragment.encode(); + // Optimization note: `max_message_size` could potentially be stored in + // `OutboundXcmpMessages` once known; that way it's only accessed when a new page is needed. + let channel_info = T::ChannelInfo::get_channel_info(recipient).ok_or(MessageSendError::NoChannel)?; - let max_message_size = channel_info.max_message_size as usize; // Max message size refers to aggregates, or pages. Not to individual fragments. - if encoded_fragment.len() > max_message_size { + let max_message_size = channel_info.max_message_size as usize; + let format_size = format.encoded_size(); + // We check the encoded fragment length plus the format size agains the max message size + // because the format is concatenated if a new page is needed. + let size_to_check = encoded_fragment + .len() + .checked_add(format_size) + .ok_or(MessageSendError::TooBig)?; + if size_to_check > max_message_size { return Err(MessageSendError::TooBig) } diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index f88dedc1ece..50c2a057d42 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -731,6 +731,50 @@ fn xcmp_queue_send_xcm_works() { }) } +#[test] +fn xcmp_queue_send_too_big_xcm_fails() { + new_test_ext().execute_with(|| { + let sibling_para_id = ParaId::from(12345); + let dest = (Parent, X1(Parachain(sibling_para_id.into()))).into(); + + let max_message_size = 100_u32; + + // open HRMP channel to the sibling_para_id with a set `max_message_size` + ParachainSystem::open_custom_outbound_hrmp_channel_for_benchmarks_or_tests( + sibling_para_id, + cumulus_primitives_core::AbridgedHrmpChannel { + max_message_size, + max_capacity: 10, + max_total_size: 10_000_000_u32, + msg_count: 0, + total_size: 0, + mqc_head: None, + }, + ); + + // Message is crafted to exceed `max_message_size` + let mut message = Xcm::builder_unsafe(); + for _ in 0..97 { + message = message.clear_origin(); + } + let message = message.build(); + let encoded_message_size = message.encode().len(); + let versioned_size = 1; // VersionedXcm enum is added by `send_xcm` and it add one additional byte + assert_eq!(encoded_message_size, max_message_size as usize - versioned_size); + + // check empty outbound queue + assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + + // Message is too big because after adding the VersionedXcm enum, it would reach + // `max_message_size` Then, adding the format, which is the worst case scenario in which a + // new page is needed, would get it over the limit + assert_eq!(send_xcm::(dest, message), Err(SendError::Transport("TooBig")),); + + // outbound queue is still empty + assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + }); +} + #[test] fn verify_fee_factor_increase_and_decrease() { use cumulus_primitives_core::AbridgedHrmpChannel; -- GitLab From 6bab88c662182fbf0f1cc7774e55681c9675eaf2 Mon Sep 17 00:00:00 2001 From: gupnik Date: Tue, 5 Dec 2023 08:31:09 +0530 Subject: [PATCH 193/199] Adds derive_impl to relay-chain and parachain runtimes (#2476) --- cumulus/parachain-template/runtime/src/lib.rs | 34 ++-------- .../assets/asset-hub-rococo/src/lib.rs | 16 +---- .../assets/asset-hub-westend/src/lib.rs | 16 +---- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 25 ++------ .../bridge-hubs/bridge-hub-westend/src/lib.rs | 25 ++------ .../collectives-westend/src/lib.rs | 13 +--- .../contracts/contracts-rococo/src/lib.rs | 16 ++--- .../glutton/glutton-westend/src/lib.rs | 17 +---- cumulus/test/runtime/src/lib.rs | 17 +---- polkadot/runtime/rococo/src/lib.rs | 17 ++--- polkadot/runtime/test-runtime/src/lib.rs | 16 +---- polkadot/runtime/westend/src/lib.rs | 16 ++--- .../bin/node-template/runtime/src/lib.rs | 36 ++--------- substrate/bin/node/runtime/src/lib.rs | 11 +--- substrate/frame/Cargo.toml | 2 +- substrate/frame/system/Cargo.toml | 1 - substrate/frame/system/src/lib.rs | 64 ++++++++++++++++++- 17 files changed, 115 insertions(+), 227 deletions(-) diff --git a/cumulus/parachain-template/runtime/src/lib.rs b/cumulus/parachain-template/runtime/src/lib.rs index be76855c05b..1ef018a8ca3 100644 --- a/cumulus/parachain-template/runtime/src/lib.rs +++ b/cumulus/parachain-template/runtime/src/lib.rs @@ -16,7 +16,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, + traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; @@ -28,13 +28,11 @@ use sp_version::RuntimeVersion; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ - ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, Everything, TransformOrigin, - }, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, EitherOfDiverse, TransformOrigin}, weights::{ constants::WEIGHT_REF_TIME_PER_SECOND, ConstantMultiplier, Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, @@ -275,45 +273,27 @@ parameter_types! { pub const SS58Prefix: u16 = 42; } -// Configure FRAME pallets to include in runtime. - +/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from +/// [`ParaChainDefaultConfig`](`struct@frame_system::config_preludes::ParaChainDefaultConfig`), +/// but overridden as needed. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); /// Block & extrinsics weights: base values and limits. type BlockWeights = RuntimeBlockWeights; /// The maximum length of a block (in bytes). diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 6b1948e7125..46bca804a9c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -39,9 +39,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{ - AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify, - }, + traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Permill, }; @@ -54,7 +52,7 @@ use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_primitives_core::ParaId; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, ord_parameter_types, parameter_types, @@ -166,25 +164,17 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = SS58Prefix; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index bd97c1c2ca5..dfe4990df96 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -38,7 +38,7 @@ use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_pallet_parachain_system::RelayNumberStrictlyIncreases; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, ord_parameter_types, parameter_types, @@ -69,9 +69,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{ - AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT, Saturating, Verify, - }, + traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, Saturating, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, Permill, RuntimeDebug, }; @@ -150,25 +148,17 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = SS58Prefix; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 2fcc7121c0c..01efb9d20cc 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -37,7 +37,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::Block as BlockT, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -49,11 +49,11 @@ use sp_version::RuntimeVersion; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything, TransformOrigin}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -211,41 +211,24 @@ parameter_types! { // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; /// Block & extrinsics weights: base values and limits. diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 121aa5f0e86..39ce4e04ed1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -39,7 +39,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::Block as BlockT, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -50,11 +50,11 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU32, ConstU64, ConstU8, Everything, TransformOrigin}, + traits::{ConstBool, ConstU32, ConstU64, ConstU8, TransformOrigin}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -212,41 +212,24 @@ parameter_types! { // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = weights::frame_system::WeightInfo; /// Block & extrinsics weights: base values and limits. diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index be82cf32fbb..842ddbe54f2 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -51,7 +51,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdConversion, AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{AccountIdConversion, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, }; @@ -64,7 +64,7 @@ use sp_version::RuntimeVersion; use codec::{Decode, Encode, MaxEncodedLen}; use cumulus_primitives_core::{AggregateMessageOrigin, ParaId}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -154,25 +154,18 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type AccountId = AccountId; type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = ConstU16<0>; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 33c2e26573e..81cc8558a2b 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -35,7 +35,7 @@ use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::Block as BlockT, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, Perbill, }; @@ -46,11 +46,11 @@ use sp_version::NativeVersion; use sp_version::RuntimeVersion; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, Everything}, + traits::{ConstBool, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8}, weights::{ConstantMultiplier, Weight}, PalletId, }; @@ -171,25 +171,17 @@ parameter_types! { } // Configure FRAME pallets to include in runtime. +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; - type OnNewAccount = (); - type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; type SystemWeightInfo = frame_system::weights::SubstrateWeight; type SS58Prefix = ConstU16<42>; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 10f4d5f3307..c9dc5131644 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -53,7 +53,7 @@ pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, BlakeTwo256, Block as BlockT}, + traits::{BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, }; @@ -64,7 +64,7 @@ use sp_version::RuntimeVersion; use cumulus_primitives_core::AggregateMessageOrigin; pub use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, parameter_types, @@ -168,25 +168,14 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type AccountId = AccountId; - type RuntimeCall = RuntimeCall; - type Lookup = AccountIdLookup; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type Block = Block; - type RuntimeEvent = RuntimeEvent; - type RuntimeOrigin = RuntimeOrigin; type BlockHashCount = BlockHashCount; type Version = Version; - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type SS58Prefix = SS58Prefix; diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 19fd6d5f02d..3de77cb1e58 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -29,7 +29,7 @@ pub mod wasm_spec_version_incremented { mod test_pallet; -use frame_support::traits::OnRuntimeUpgrade; +use frame_support::{derive_impl, traits::OnRuntimeUpgrade}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; use sp_core::{ConstU32, OpaqueMetadata}; use sp_runtime::{ @@ -177,36 +177,23 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::ParaChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = IdentityLookup; /// The index type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; /// The block type. type Block = Block; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// Runtime version. type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type DbWeight = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type SS58Prefix = SS58Prefix; diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 23d32b59a51..21d4088df19 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -64,7 +64,7 @@ use beefy_primitives::{ }; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ @@ -84,9 +84,8 @@ use sp_core::{ConstU128, OpaqueMetadata, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - AccountIdLookup, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, - Extrinsic as ExtrinsicT, IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, - Verify, + BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, Extrinsic as ExtrinsicT, + IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, @@ -186,29 +185,21 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::RelayChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type AccountId = AccountId; - type Lookup = AccountIdLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = SS58Prefix; - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 7ec9f63748c..81b9b63f75b 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -42,10 +42,10 @@ use frame_election_provider_support::{ onchain, SequentialPhragmen, }; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, - traits::{Everything, KeyOwnerProofSystem, WithdrawReasons}, + traits::{KeyOwnerProofSystem, WithdrawReasons}, }; use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId}; use pallet_session::historical as session_historical; @@ -139,29 +139,19 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::RelayChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - type BaseCallFilter = Everything; type BlockWeights = BlockWeights; type BlockLength = BlockLength; - type DbWeight = (); - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; type Hash = HashT; - type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = Indices; type Block = Block; - type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); type SS58Prefix = SS58Prefix; - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 36c8c12be21..afc2e5ea3da 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -27,7 +27,7 @@ use beefy_primitives::{ }; use frame_election_provider_support::{bounds::ElectionBoundsBuilder, onchain, SequentialPhragmen}; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, genesis_builder_helper::{build_config, create_default_config}, parameter_types, traits::{ @@ -83,8 +83,8 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - AccountIdLookup, BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, - IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, + BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, IdentityLookup, + Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, BoundToRuntimeAppPublic, FixedU128, KeyTypeId, Perbill, Percent, Permill, @@ -181,29 +181,21 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } +#[derive_impl(frame_system::config_preludes::RelayChainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = EverythingBut; type BlockWeights = BlockWeights; type BlockLength = BlockLength; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type AccountId = AccountId; - type Lookup = AccountIdLookup; type Block = Block; - type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type DbWeight = RocksDbWeight; type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); type SystemWeightInfo = weights::frame_system::WeightInfo; type SS58Prefix = SS58Prefix; - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/substrate/bin/node-template/runtime/src/lib.rs index 6aa4cb70fde..5f399edda98 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/substrate/bin/node-template/runtime/src/lib.rs @@ -12,9 +12,7 @@ use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{ - AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, One, Verify, - }, + traits::{BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, One, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; @@ -26,7 +24,7 @@ use sp_version::RuntimeVersion; use frame_support::genesis_builder_helper::{build_config, create_default_config}; // A few exports that help ease life for downstream crates. pub use frame_support::{ - construct_runtime, parameter_types, + construct_runtime, derive_impl, parameter_types, traits::{ ConstBool, ConstU128, ConstU32, ConstU64, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo, @@ -151,11 +149,11 @@ parameter_types! { pub const SS58Prefix: u8 = 42; } -// Configure FRAME pallets to include in runtime. - +/// The default types are being injected by [`derive_impl`](`frame_support::derive_impl`) from +/// [`SoloChainDefaultConfig`](`struct@frame_system::config_preludes::SolochainDefaultConfig`), +/// but overridden as needed. +#[derive_impl(frame_system::config_preludes::SolochainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { - /// The basic call filter to use in dispatchable. - type BaseCallFilter = frame_support::traits::Everything; /// The block type for the runtime. type Block = Block; /// Block & extrinsics weights: base values and limits. @@ -164,42 +162,20 @@ impl frame_system::Config for Runtime { type BlockLength = BlockLength; /// The identifier used to distinguish between accounts. type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type RuntimeCall = RuntimeCall; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; /// The type for storing how many extrinsics an account has signed. type Nonce = Nonce; /// The type for hashing blocks and tries. type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; - /// The ubiquitous event type. - type RuntimeEvent = RuntimeEvent; - /// The ubiquitous origin type. - type RuntimeOrigin = RuntimeOrigin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; /// Version of the runtime. type Version = Version; - /// Converts a module to the index of the module in `construct_runtime!`. - /// - /// This type is being generated by `construct_runtime!`. - type PalletInfo = PalletInfo; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); /// The data to be stored in an account. type AccountData = pallet_balances::AccountData; - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); /// This is used as an identifier of the chain. 42 is the generic substrate prefix. type SS58Prefix = SS58Prefix; - /// The set code logic, just the default since we're not a parachain. - type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index e2746b1c35b..1f4f22f224a 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -28,7 +28,7 @@ use frame_election_provider_support::{ onchain, BalancingConfig, ElectionDataProvider, SequentialPhragmen, VoteWeight, }; use frame_support::{ - construct_runtime, + construct_runtime, derive_impl, dispatch::DispatchClass, genesis_builder_helper::{build_config, create_default_config}, instances::{Instance1, Instance2}, @@ -283,29 +283,22 @@ impl pallet_safe_mode::Config for Runtime { type WeightInfo = pallet_safe_mode::weights::SubstrateWeight; } +#[derive_impl(frame_system::config_preludes::SolochainDefaultConfig as frame_system::DefaultConfig)] impl frame_system::Config for Runtime { type BaseCallFilter = InsideBoth; type BlockWeights = RuntimeBlockWeights; type BlockLength = RuntimeBlockLength; type DbWeight = RocksDbWeight; - type RuntimeOrigin = RuntimeOrigin; - type RuntimeCall = RuntimeCall; type Nonce = Nonce; type Hash = Hash; - type Hashing = BlakeTwo256; type AccountId = AccountId; type Lookup = Indices; type Block = Block; - type RuntimeEvent = RuntimeEvent; type BlockHashCount = BlockHashCount; type Version = Version; - type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); type SystemWeightInfo = frame_system::weights::SubstrateWeight; type SS58Prefix = ConstU16<42>; - type OnSetCode = (); type MaxConsumers = ConstU32<16>; } diff --git a/substrate/frame/Cargo.toml b/substrate/frame/Cargo.toml index 93306e8af3d..d6953dac7b8 100644 --- a/substrate/frame/Cargo.toml +++ b/substrate/frame/Cargo.toml @@ -53,7 +53,7 @@ pallet-examples = { path = "./examples" } [features] default = ["runtime", "std"] -experimental = ["frame-support/experimental", "frame-system/experimental"] +experimental = ["frame-support/experimental"] runtime = [ "frame-executive", "frame-system-rpc-runtime-api", diff --git a/substrate/frame/system/Cargo.toml b/substrate/frame/system/Cargo.toml index 0ddff8bf41e..3b454ac18f9 100644 --- a/substrate/frame/system/Cargo.toml +++ b/substrate/frame/system/Cargo.toml @@ -53,7 +53,6 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime", "sp-runtime/try-runtime"] -experimental = [] [[bench]] name = "bench" diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 15dd047fdb0..640cb133213 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -206,6 +206,7 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { use super::{inject_runtime_type, DefaultConfig}; + use frame_support::derive_impl; /// Provides a viable default config that can be used with /// [`derive_impl`](`frame_support::derive_impl`) to derive a testing pallet config @@ -258,39 +259,98 @@ pub mod pallet { /// if you use `pallet-balances` or similar. /// * Make sure to overwrite [`DefaultConfig::Version`]. /// * 2s block time, and a default 5mb block size is used. - #[cfg(feature = "experimental")] pub struct SolochainDefaultConfig; - #[cfg(feature = "experimental")] #[frame_support::register_default_impl(SolochainDefaultConfig)] impl DefaultConfig for SolochainDefaultConfig { + /// The default type for storing how many extrinsics an account has signed. type Nonce = u32; + + /// The default type for hashing blocks and tries. type Hash = sp_core::hash::H256; + + /// The default hashing algorithm used. type Hashing = sp_runtime::traits::BlakeTwo256; + + /// The default identifier used to distinguish between accounts. type AccountId = sp_runtime::AccountId32; + + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = sp_runtime::traits::AccountIdLookup; + + /// The maximum number of consumers allowed on a single account. Using 128 as default. type MaxConsumers = frame_support::traits::ConstU32<128>; + + /// The default data to be stored in an account. type AccountData = crate::AccountInfo; + + /// What to do if a new account is created. type OnNewAccount = (); + + /// What to do if an account is fully reaped from the system. type OnKilledAccount = (); + + /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); + + /// This is used as an identifier of the chain. type SS58Prefix = (); + + /// Version of the runtime. type Version = (); + + /// Block & extrinsics weights: base values and limits. type BlockWeights = (); + + /// The maximum length of a block (in bytes). type BlockLength = (); + + /// The weight of database operations that the runtime can invoke. type DbWeight = (); + + /// The ubiquitous event type injected by `construct_runtime!`. #[inject_runtime_type] type RuntimeEvent = (); + + /// The ubiquitous origin type injected by `construct_runtime!`. #[inject_runtime_type] type RuntimeOrigin = (); + + /// The aggregated dispatch type available for extrinsics, injected by + /// `construct_runtime!`. #[inject_runtime_type] type RuntimeCall = (); + + /// Converts a module to the index of the module, injected by `construct_runtime!`. #[inject_runtime_type] type PalletInfo = (); + + /// The basic call filter to use in dispatchable. Supports everything as the default. type BaseCallFilter = frame_support::traits::Everything; + + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + /// Using 256 as default. type BlockHashCount = frame_support::traits::ConstU32<256>; + + /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); } + + /// Default configurations of this pallet in a relay-chain environment. + pub struct RelayChainDefaultConfig; + + /// It currently uses the same configuration as `SolochainDefaultConfig`. + #[derive_impl(SolochainDefaultConfig as DefaultConfig, no_aggregated_types)] + #[frame_support::register_default_impl(RelayChainDefaultConfig)] + impl DefaultConfig for RelayChainDefaultConfig {} + + /// Default configurations of this pallet in a parachain environment. + pub struct ParaChainDefaultConfig; + + /// It currently uses the same configuration as `SolochainDefaultConfig`. + #[derive_impl(SolochainDefaultConfig as DefaultConfig, no_aggregated_types)] + #[frame_support::register_default_impl(ParaChainDefaultConfig)] + impl DefaultConfig for ParaChainDefaultConfig {} } /// System configuration trait. Implemented by runtime. -- GitLab From f8b03d9564d3c3708a2a9a278f5c659b8c7a1f6b Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 5 Dec 2023 09:02:49 +0100 Subject: [PATCH 194/199] Stabilize pov-recovery zombienet (#2611) Smoldot sometimes stops reporting finalized blocks to us. Since we are recovering from the relay chain with a huge delay on full nodes, we can not rely on import notifications to set the best block. Because sometimes we also do not receive finality notifications, the height check in zombienet fails. Proper solution is to update smoldot, there have been some changes since the version we use. But upgrade is blocked by version conflict which will be resolved with https://github.com/paritytech/polkadot-sdk/pull/1631. --- cumulus/zombienet/tests/0002-pov_recovery.zndsl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cumulus/zombienet/tests/0002-pov_recovery.zndsl b/cumulus/zombienet/tests/0002-pov_recovery.zndsl index 7a93e2f3742..b05285c87bf 100644 --- a/cumulus/zombienet/tests/0002-pov_recovery.zndsl +++ b/cumulus/zombienet/tests/0002-pov_recovery.zndsl @@ -13,7 +13,8 @@ alice: reports block height is at least 20 within 600 seconds charlie: reports block height is at least 20 within 600 seconds one: reports block height is at least 20 within 800 seconds two: reports block height is at least 20 within 800 seconds -three: reports block height is at least 20 within 800 seconds +# Re-enable once we upgraded from smoldot 0.11.0 and https://github.com/paritytech/polkadot-sdk/pull/1631 is merged +# three: reports block height is at least 20 within 800 seconds eve: reports block height is at least 20 within 800 seconds one: count of log lines containing "Importing block retrieved using pov_recovery" is greater than 19 within 10 seconds -- GitLab From 2f9af7873dd2efb145f91d6b65e71beb40fead06 Mon Sep 17 00:00:00 2001 From: Vladimir Istyufeev Date: Tue, 5 Dec 2023 12:15:14 +0400 Subject: [PATCH 195/199] Remove `cargo install` for Zepter and Taplo (#2614) --- .gitlab/pipeline/check.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 98435248eb4..f95bd224fb4 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -87,7 +87,6 @@ check-rust-feature-propagation: - .kubernetes-env - .common-refs script: - - cargo install --locked --version 0.13.3 -q -f zepter && zepter --version - zepter run check check-toml-format: @@ -96,7 +95,6 @@ check-toml-format: - .kubernetes-env - .common-refs script: - - cargo install taplo-cli --locked --version 0.8.1 - taplo format --check --config .config/taplo.toml - echo "Please run `taplo format --config .config/taplo.toml` to fix any toml formatting issues" -- GitLab From a310df263de0755d2a00ad02fd5692810cf2d2a3 Mon Sep 17 00:00:00 2001 From: Juan Girini Date: Tue, 5 Dec 2023 10:23:24 +0100 Subject: [PATCH 196/199] Move `developer-hub` to `polkadot-sdk-docs` (#2598) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR is a continuation of https://github.com/paritytech/polkadot-sdk/pull/2102 and part of an initiative started here https://hackmd.io/@romanp/rJ318ZCEp What has been done: - The content under `docs/*` (with the exception of `docs/mermaid`) has been moved to `docs/contributor/` - Developer Hub has been renamed to Polkadot SDK Docs, and the crate has been renamed from `developer-hub` to `polkadot-sdk-docs` - The content under `developer-hub/*` has been moved to `docs/sdk` --- Original PR https://github.com/paritytech/polkadot-sdk/pull/2565, it has been close due to too many rebase conflicts --------- Co-authored-by: Serban Iorga Co-authored-by: Chevdor Co-authored-by: Egor_P Co-authored-by: Bastian Köcher --- .github/workflows/check-prdoc.yml | 2 +- .gitignore | 1 + .gitlab/pipeline/build.yml | 2 +- .gitlab/pipeline/test.yml | 2 +- Cargo.lock | 78 +++++++++---------- Cargo.toml | 2 +- README.md | 6 +- cumulus/README.md | 6 +- developer-hub/Cargo.toml | 66 ---------------- docs/{ => contributor}/CODE_OF_CONDUCT.md | 0 docs/{ => contributor}/CONTRIBUTING.md | 0 .../DEPRECATION_CHECKLIST.md | 2 +- .../DOCUMENTATION_GUIDELINES.md | 0 .../PULL_REQUEST_TEMPLATE.md | 2 +- docs/{ => contributor}/SECURITY.md | 0 docs/{ => contributor}/STYLE_GUIDE.md | 0 docs/{ => contributor}/container.md | 0 docs/{ => contributor}/docker.md | 0 docs/{ => contributor}/markdown_linting.md | 0 docs/{ => contributor}/prdoc.md | 0 docs/mermaid/IA.mmd | 2 +- docs/sdk/Cargo.toml | 66 ++++++++++++++++ {developer-hub => docs/sdk}/headers/toc.html | 8 +- .../sdk}/src/guides/changing_consensus.rs | 0 .../src/guides/cumulus_enabled_parachain.rs | 0 {developer-hub => docs/sdk}/src/guides/mod.rs | 2 +- .../sdk}/src/guides/xcm_enabled_parachain.rs | 0 .../sdk}/src/guides/your_first_node.rs | 0 .../sdk}/src/guides/your_first_pallet/mod.rs | 4 +- .../guides/your_first_pallet/with_event.rs | 0 .../sdk}/src/guides/your_first_runtime.rs | 0 {developer-hub => docs/sdk}/src/lib.rs | 6 +- .../sdk}/src/meta_contributing.rs | 4 +- .../sdk}/src/polkadot_sdk/cumulus.rs | 0 .../sdk}/src/polkadot_sdk/frame_runtime.rs | 2 +- .../sdk}/src/polkadot_sdk/mod.rs | 8 +- .../sdk}/src/polkadot_sdk/polkadot.rs | 0 .../sdk}/src/polkadot_sdk/smart_contracts.rs | 0 .../sdk}/src/polkadot_sdk/substrate.rs | 2 +- .../sdk}/src/polkadot_sdk/templates.rs | 0 .../sdk}/src/polkadot_sdk/xcm.rs | 0 .../reference_docs/blockchain_scalibility.rs | 0 .../blockchain_state_machines.rs | 4 +- .../src/reference_docs/chain_spec_genesis.rs | 0 .../sdk}/src/reference_docs/cli.rs | 0 .../src/reference_docs/consensus_swapping.rs | 0 .../src/reference_docs/extrinsic_encoding.rs | 2 +- .../src/reference_docs/fee_less_runtime.rs | 0 .../frame_benchmarking_weight.rs | 0 .../reference_docs/frame_composite_enums.rs | 0 .../sdk}/src/reference_docs/frame_currency.rs | 0 .../sdk}/src/reference_docs/frame_origin.rs | 0 .../reference_docs/frame_runtime_migration.rs | 0 .../reference_docs/frame_system_accounts.rs | 0 .../sdk}/src/reference_docs/glossary.rs | 0 .../sdk}/src/reference_docs/light_nodes.rs | 0 .../sdk}/src/reference_docs/metadata.rs | 0 .../sdk}/src/reference_docs/mod.rs | 0 .../runtime_vs_smart_contract.rs | 0 .../safe_defensive_programming.rs | 0 .../src/reference_docs/signed_extensions.rs | 0 .../reference_docs/trait_based_programming.rs | 2 +- .../sdk}/src/reference_docs/wasm_memory.rs | 0 .../src/reference_docs/wasm_meta_protocol.rs | 8 +- polkadot/README.md | 6 +- substrate/README.md | 10 +-- substrate/frame/src/lib.rs | 2 +- 67 files changed, 153 insertions(+), 154 deletions(-) delete mode 100644 developer-hub/Cargo.toml rename docs/{ => contributor}/CODE_OF_CONDUCT.md (100%) rename docs/{ => contributor}/CONTRIBUTING.md (100%) rename docs/{ => contributor}/DEPRECATION_CHECKLIST.md (98%) rename docs/{ => contributor}/DOCUMENTATION_GUIDELINES.md (100%) rename docs/{ => contributor}/PULL_REQUEST_TEMPLATE.md (96%) rename docs/{ => contributor}/SECURITY.md (100%) rename docs/{ => contributor}/STYLE_GUIDE.md (100%) rename docs/{ => contributor}/container.md (100%) rename docs/{ => contributor}/docker.md (100%) rename docs/{ => contributor}/markdown_linting.md (100%) rename docs/{ => contributor}/prdoc.md (100%) create mode 100644 docs/sdk/Cargo.toml rename {developer-hub => docs/sdk}/headers/toc.html (82%) rename {developer-hub => docs/sdk}/src/guides/changing_consensus.rs (100%) rename {developer-hub => docs/sdk}/src/guides/cumulus_enabled_parachain.rs (100%) rename {developer-hub => docs/sdk}/src/guides/mod.rs (96%) rename {developer-hub => docs/sdk}/src/guides/xcm_enabled_parachain.rs (100%) rename {developer-hub => docs/sdk}/src/guides/your_first_node.rs (100%) rename {developer-hub => docs/sdk}/src/guides/your_first_pallet/mod.rs (99%) rename {developer-hub => docs/sdk}/src/guides/your_first_pallet/with_event.rs (100%) rename {developer-hub => docs/sdk}/src/guides/your_first_runtime.rs (100%) rename {developer-hub => docs/sdk}/src/lib.rs (94%) rename {developer-hub => docs/sdk}/src/meta_contributing.rs (97%) rename {developer-hub => docs/sdk}/src/polkadot_sdk/cumulus.rs (100%) rename {developer-hub => docs/sdk}/src/polkadot_sdk/frame_runtime.rs (98%) rename {developer-hub => docs/sdk}/src/polkadot_sdk/mod.rs (95%) rename {developer-hub => docs/sdk}/src/polkadot_sdk/polkadot.rs (100%) rename {developer-hub => docs/sdk}/src/polkadot_sdk/smart_contracts.rs (100%) rename {developer-hub => docs/sdk}/src/polkadot_sdk/substrate.rs (99%) rename {developer-hub => docs/sdk}/src/polkadot_sdk/templates.rs (100%) rename {developer-hub => docs/sdk}/src/polkadot_sdk/xcm.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/blockchain_scalibility.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/blockchain_state_machines.rs (90%) rename {developer-hub => docs/sdk}/src/reference_docs/chain_spec_genesis.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/cli.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/consensus_swapping.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/extrinsic_encoding.rs (98%) rename {developer-hub => docs/sdk}/src/reference_docs/fee_less_runtime.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/frame_benchmarking_weight.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/frame_composite_enums.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/frame_currency.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/frame_origin.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/frame_runtime_migration.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/frame_system_accounts.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/glossary.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/light_nodes.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/metadata.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/mod.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/runtime_vs_smart_contract.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/safe_defensive_programming.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/signed_extensions.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/trait_based_programming.rs (98%) rename {developer-hub => docs/sdk}/src/reference_docs/wasm_memory.rs (100%) rename {developer-hub => docs/sdk}/src/reference_docs/wasm_meta_protocol.rs (95%) diff --git a/.github/workflows/check-prdoc.yml b/.github/workflows/check-prdoc.yml index d5cb88a4255..f47404744a4 100644 --- a/.github/workflows/check-prdoc.yml +++ b/.github/workflows/check-prdoc.yml @@ -12,7 +12,7 @@ env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_PR: ${{ github.event.pull_request.number }} ENGINE: docker - PRDOC_DOC: https://github.com/paritytech/polkadot-sdk/blob/master/docs/prdoc.md + PRDOC_DOC: https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/prdoc.md jobs: check-prdoc: diff --git a/.gitignore b/.gitignore index 581c417cb85..2f1631fb4b9 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ bin/node-template/Cargo.lock nohup.out polkadot_argument_parsing polkadot.* +!docs/sdk/src/polkadot_sdk/polkadot.rs pwasm-alloc/Cargo.lock pwasm-libc/Cargo.lock release-artifacts diff --git a/.gitlab/pipeline/build.yml b/.gitlab/pipeline/build.yml index d6918173d49..377236193cc 100644 --- a/.gitlab/pipeline/build.yml +++ b/.gitlab/pipeline/build.yml @@ -125,7 +125,7 @@ build-rustdoc: find "$path" -name '*.html' | xargs -I {} -P "$(nproc)" bash -c 'process_file "$@"' _ {} } inject_simple_analytics "./crate-docs" - - echo "" > ./crate-docs/index.html + - echo "" > ./crate-docs/index.html build-implementers-guide: stage: build diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 265b378ef5b..f6dad887a68 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -313,7 +313,7 @@ node-bench-regression-guard: after_script: [""] # if this fails run `bot update-ui` in the Pull Request or "./scripts/update-ui-tests.sh" locally -# see ./docs/CONTRIBUTING.md#ui-tests +# see ./docs/contributor/CONTRIBUTING.md#ui-tests test-frame-ui: stage: test extends: diff --git a/Cargo.lock b/Cargo.lock index bbaa3d053a3..be4db2ca626 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4383,45 +4383,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "developer-hub" -version = "0.0.1" -dependencies = [ - "cumulus-pallet-aura-ext", - "cumulus-pallet-parachain-system", - "docify", - "frame", - "kitchensink-runtime", - "pallet-aura", - "pallet-default-config-example", - "pallet-examples", - "pallet-timestamp", - "parity-scale-codec", - "sc-cli", - "sc-client-db", - "sc-consensus-aura", - "sc-consensus-babe", - "sc-consensus-beefy", - "sc-consensus-grandpa", - "sc-consensus-manual-seal", - "sc-consensus-pow", - "sc-network", - "sc-rpc", - "sc-rpc-api", - "scale-info", - "simple-mermaid 0.1.0 (git+https://github.com/kianenigma/simple-mermaid.git?branch=main)", - "sp-api", - "sp-core", - "sp-io", - "sp-keyring", - "sp-runtime", - "staging-chain-spec-builder", - "staging-node-cli", - "staging-parachain-info", - "subkey", - "substrate-wasm-builder", -] - [[package]] name = "diff" version = "0.1.13" @@ -13013,6 +12974,45 @@ dependencies = [ "thousands", ] +[[package]] +name = "polkadot-sdk-docs" +version = "0.0.1" +dependencies = [ + "cumulus-pallet-aura-ext", + "cumulus-pallet-parachain-system", + "docify", + "frame", + "kitchensink-runtime", + "pallet-aura", + "pallet-default-config-example", + "pallet-examples", + "pallet-timestamp", + "parity-scale-codec", + "sc-cli", + "sc-client-db", + "sc-consensus-aura", + "sc-consensus-babe", + "sc-consensus-beefy", + "sc-consensus-grandpa", + "sc-consensus-manual-seal", + "sc-consensus-pow", + "sc-network", + "sc-rpc", + "sc-rpc-api", + "scale-info", + "simple-mermaid 0.1.0 (git+https://github.com/kianenigma/simple-mermaid.git?branch=main)", + "sp-api", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "staging-chain-spec-builder", + "staging-node-cli", + "staging-parachain-info", + "subkey", + "substrate-wasm-builder", +] + [[package]] name = "polkadot-service" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 5fb7c0f2315..b694afb3f5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,7 +106,7 @@ members = [ "cumulus/test/runtime", "cumulus/test/service", "cumulus/xcm/xcm-emulator", - "developer-hub", + "docs/sdk", "polkadot", "polkadot/cli", "polkadot/core-primitives", diff --git a/README.md b/README.md index 56b3481bafc..1f255823b5b 100644 --- a/README.md +++ b/README.md @@ -46,12 +46,12 @@ Below are the primary upstream dependencies utilized in this project: ## Security -The security policy and procedures can be found in [docs/SECURITY.md](./docs/SECURITY.md). +The security policy and procedures can be found in [docs/contributor/SECURITY.md](./docs/contributor/SECURITY.md). ## Contributing & Code of Conduct -Ensure you follow our [contribution guidelines](./docs/CONTRIBUTING.md). In every interaction and contribution, this -project adheres to the [Contributor Covenant Code of Conduct](./docs/CODE_OF_CONDUCT.md). +Ensure you follow our [contribution guidelines](./docs/contributor/CONTRIBUTING.md). In every interaction and +contribution, this project adheres to the [Contributor Covenant Code of Conduct](./docs/contributor/CODE_OF_CONDUCT.md). ## Additional Resources diff --git a/cumulus/README.md b/cumulus/README.md index 6acbf6dc2ca..7e145ad7b4a 100644 --- a/cumulus/README.md +++ b/cumulus/README.md @@ -4,7 +4,7 @@ This repository contains both the Cumulus SDK and also specific chains implemented on top of this SDK. -If you only want to run a **Polkadot Parachain Node**, check out our [container section](./docs/container.md). +If you only want to run a **Polkadot Parachain Node**, check out our [container section](./docs/contributor/container.md). ## Cumulus SDK @@ -34,7 +34,7 @@ A Polkadot [collator](https://wiki.polkadot.network/docs/en/learn-collator) for `polkadot-parachain` binary (previously called `polkadot-collator`). You may run `polkadot-parachain` locally after building it or using one of the container option described -[here](./docs/container.md). +[here](./docs/contributor/container.md). ### Relay Chain Interaction To operate a parachain node, a connection to the corresponding relay chain is necessary. This can be achieved in one of @@ -242,7 +242,7 @@ Once the executable is built, launch collators for each parachain (repeat once e ./target/release/polkadot-parachain --chain $CHAIN --validator ``` -You can also build [using a container](./docs/container.md). +You can also build [using a container](./docs/contributor/container.md). ### Parachains diff --git a/developer-hub/Cargo.toml b/developer-hub/Cargo.toml deleted file mode 100644 index 56f279c7e10..00000000000 --- a/developer-hub/Cargo.toml +++ /dev/null @@ -1,66 +0,0 @@ -[package] -name = "developer-hub" -description = "The one stop shop for developers of the polakdot-sdk" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "paritytech.github.io" -repository.workspace = true -authors.workspace = true -edition.workspace = true -# This crate is not publish-able to crates.io for now because of docify. -publish = false -version = "0.0.1" - -[dependencies] -# Needed for all FRAME-based code -parity-scale-codec = { version = "3.0.0", default-features = false } -scale-info = { version = "2.6.0", default-features = false } -frame = { path = "../substrate/frame", features = ["experimental", "runtime"] } -pallet-examples = { path = "../substrate/frame/examples" } -pallet-default-config-example = { path = "../substrate/frame/examples/default-config" } - -# How we build docs in rust-docs -simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } -docify = "0.2.6" - -# Polkadot SDK deps, typically all should only be scope such that we can link to their doc item. -node-cli = { package = "staging-node-cli", path = "../substrate/bin/node/cli" } -kitchensink-runtime = { path = "../substrate/bin/node/runtime" } -chain-spec-builder = { package = "staging-chain-spec-builder", path = "../substrate/bin/utils/chain-spec-builder" } -subkey = { path = "../substrate/bin/utils/subkey" } - -# Substrate -sc-network = { path = "../substrate/client/network" } -sc-rpc-api = { path = "../substrate/client/rpc-api" } -sc-rpc = { path = "../substrate/client/rpc" } -sc-client-db = { path = "../substrate/client/db" } -sc-cli = { path = "../substrate/client/cli" } -sc-consensus-aura = { path = "../substrate/client/consensus/aura" } -sc-consensus-babe = { path = "../substrate/client/consensus/babe" } -sc-consensus-grandpa = { path = "../substrate/client/consensus/grandpa" } -sc-consensus-beefy = { path = "../substrate/client/consensus/beefy" } -sc-consensus-manual-seal = { path = "../substrate/client/consensus/manual-seal" } -sc-consensus-pow = { path = "../substrate/client/consensus/pow" } -substrate-wasm-builder = { path = "../substrate/utils/wasm-builder" } - -# Cumulus -cumulus-pallet-aura-ext = { path = "../cumulus/pallets/aura-ext" } -cumulus-pallet-parachain-system = { path = "../cumulus/pallets/parachain-system", features = [ - "parameterized-consensus-hook", -] } -parachain-info = { package = "staging-parachain-info", path = "../cumulus/parachains/pallets/parachain-info" } -pallet-aura = { path = "../substrate/frame/aura", default-features = false } -pallet-timestamp = { path = "../substrate/frame/timestamp" } - -# Primitives -sp-io = { path = "../substrate/primitives/io" } -sp-api = { path = "../substrate/primitives/api" } -sp-core = { path = "../substrate/primitives/core" } -sp-keyring = { path = "../substrate/primitives/keyring" } -sp-runtime = { path = "../substrate/primitives/runtime" } - -[dev-dependencies] -parity-scale-codec = "3.6.5" -scale-info = "2.9.0" - -[features] -experimental = ["pallet-aura/experimental"] diff --git a/docs/CODE_OF_CONDUCT.md b/docs/contributor/CODE_OF_CONDUCT.md similarity index 100% rename from docs/CODE_OF_CONDUCT.md rename to docs/contributor/CODE_OF_CONDUCT.md diff --git a/docs/CONTRIBUTING.md b/docs/contributor/CONTRIBUTING.md similarity index 100% rename from docs/CONTRIBUTING.md rename to docs/contributor/CONTRIBUTING.md diff --git a/docs/DEPRECATION_CHECKLIST.md b/docs/contributor/DEPRECATION_CHECKLIST.md similarity index 98% rename from docs/DEPRECATION_CHECKLIST.md rename to docs/contributor/DEPRECATION_CHECKLIST.md index fccf93d2273..ffb99e1ec3f 100644 --- a/docs/DEPRECATION_CHECKLIST.md +++ b/docs/contributor/DEPRECATION_CHECKLIST.md @@ -45,7 +45,7 @@ We also need [https://docs.substrate.io/](https://docs.substrate.io/) to be upda ## Announce the deprecation and removal -**At minimum they should be noted in the release log.** Please see how to document a PR [here](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md#documentation). +**At minimum they should be noted in the release log.** Please see how to document a PR [here](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md#documentation). There you can give instructions based on the audience and tell them what they need to do to upgrade the code. Some breaking changes have a bigger impact than others. When the impact is big the release note is not enough, though diff --git a/docs/DOCUMENTATION_GUIDELINES.md b/docs/contributor/DOCUMENTATION_GUIDELINES.md similarity index 100% rename from docs/DOCUMENTATION_GUIDELINES.md rename to docs/contributor/DOCUMENTATION_GUIDELINES.md diff --git a/docs/PULL_REQUEST_TEMPLATE.md b/docs/contributor/PULL_REQUEST_TEMPLATE.md similarity index 96% rename from docs/PULL_REQUEST_TEMPLATE.md rename to docs/contributor/PULL_REQUEST_TEMPLATE.md index c93ac90c7e3..79a036a235a 100644 --- a/docs/PULL_REQUEST_TEMPLATE.md +++ b/docs/contributor/PULL_REQUEST_TEMPLATE.md @@ -3,7 +3,7 @@ ✄ ----------------------------------------------------------------------------- Thank you for your Pull Request! 🙏 Please make sure it follows the contribution guidelines outlined in -[this document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/CONTRIBUTING.md) and fill +[this document](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/CONTRIBUTING.md) and fill out the sections below. Once you're ready to submit your PR for review, please delete this section and leave only the text under the "Description" heading. diff --git a/docs/SECURITY.md b/docs/contributor/SECURITY.md similarity index 100% rename from docs/SECURITY.md rename to docs/contributor/SECURITY.md diff --git a/docs/STYLE_GUIDE.md b/docs/contributor/STYLE_GUIDE.md similarity index 100% rename from docs/STYLE_GUIDE.md rename to docs/contributor/STYLE_GUIDE.md diff --git a/docs/container.md b/docs/contributor/container.md similarity index 100% rename from docs/container.md rename to docs/contributor/container.md diff --git a/docs/docker.md b/docs/contributor/docker.md similarity index 100% rename from docs/docker.md rename to docs/contributor/docker.md diff --git a/docs/markdown_linting.md b/docs/contributor/markdown_linting.md similarity index 100% rename from docs/markdown_linting.md rename to docs/contributor/markdown_linting.md diff --git a/docs/prdoc.md b/docs/contributor/prdoc.md similarity index 100% rename from docs/prdoc.md rename to docs/contributor/prdoc.md diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index 8fcb74fa0a9..93d3e92814c 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,5 +1,5 @@ flowchart - parity[paritytech.github.io] --> devhub[developer_hub] + parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml new file mode 100644 index 00000000000..f5bfd80fd10 --- /dev/null +++ b/docs/sdk/Cargo.toml @@ -0,0 +1,66 @@ +[package] +name = "polkadot-sdk-docs" +description = "The one stop shop for developers of the polakdot-sdk" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "paritytech.github.io" +repository.workspace = true +authors.workspace = true +edition.workspace = true +# This crate is not publish-able to crates.io for now because of docify. +publish = false +version = "0.0.1" + +[dependencies] +# Needed for all FRAME-based code +parity-scale-codec = { version = "3.0.0", default-features = false } +scale-info = { version = "2.6.0", default-features = false } +frame = { path = "../../substrate/frame", features = ["experimental", "runtime"] } +pallet-examples = { path = "../../substrate/frame/examples" } +pallet-default-config-example = { path = "../../substrate/frame/examples/default-config" } + +# How we build docs in rust-docs +simple-mermaid = { git = "https://github.com/kianenigma/simple-mermaid.git", branch = "main" } +docify = "0.2.6" + +# Polkadot SDK deps, typically all should only be scope such that we can link to their doc item. +node-cli = { package = "staging-node-cli", path = "../../substrate/bin/node/cli" } +kitchensink-runtime = { path = "../../substrate/bin/node/runtime" } +chain-spec-builder = { package = "staging-chain-spec-builder", path = "../../substrate/bin/utils/chain-spec-builder" } +subkey = { path = "../../substrate/bin/utils/subkey" } + +# Substrate +sc-network = { path = "../../substrate/client/network" } +sc-rpc-api = { path = "../../substrate/client/rpc-api" } +sc-rpc = { path = "../../substrate/client/rpc" } +sc-client-db = { path = "../../substrate/client/db" } +sc-cli = { path = "../../substrate/client/cli" } +sc-consensus-aura = { path = "../../substrate/client/consensus/aura" } +sc-consensus-babe = { path = "../../substrate/client/consensus/babe" } +sc-consensus-grandpa = { path = "../../substrate/client/consensus/grandpa" } +sc-consensus-beefy = { path = "../../substrate/client/consensus/beefy" } +sc-consensus-manual-seal = { path = "../../substrate/client/consensus/manual-seal" } +sc-consensus-pow = { path = "../../substrate/client/consensus/pow" } +substrate-wasm-builder = { path = "../../substrate/utils/wasm-builder" } + +# Cumulus +cumulus-pallet-aura-ext = { path = "../../cumulus/pallets/aura-ext" } +cumulus-pallet-parachain-system = { path = "../../cumulus/pallets/parachain-system", features = [ + "parameterized-consensus-hook", +] } +parachain-info = { package = "staging-parachain-info", path = "../../cumulus/parachains/pallets/parachain-info" } +pallet-aura = { path = "../../substrate/frame/aura", default-features = false } +pallet-timestamp = { path = "../../substrate/frame/timestamp" } + +# Primitives +sp-io = { path = "../../substrate/primitives/io" } +sp-api = { path = "../../substrate/primitives/api" } +sp-core = { path = "../../substrate/primitives/core" } +sp-keyring = { path = "../../substrate/primitives/keyring" } +sp-runtime = { path = "../../substrate/primitives/runtime" } + +[dev-dependencies] +parity-scale-codec = "3.6.5" +scale-info = "2.9.0" + +[features] +experimental = ["pallet-aura/experimental"] diff --git a/developer-hub/headers/toc.html b/docs/sdk/headers/toc.html similarity index 82% rename from developer-hub/headers/toc.html rename to docs/sdk/headers/toc.html index d4d017eb207..a4a074cb4f3 100644 --- a/developer-hub/headers/toc.html +++ b/docs/sdk/headers/toc.html @@ -1,15 +1,15 @@
Changelog

Sourced from secp256k1's changelog.

0.28.0 - 2023-10-23

  • Add bindings to the ElligatorSwift implementation #627
  • Depend on recent release of bitcoin_hashes v0.13.0 #621
  • Add a verify function to PublicKey #618
  • Add serialize function for schnorr::Signature #607
  • Bump MSRV to 1.48 #595
  • Remove implementations of PartialEq, Eq, PartialOrd, Ord, and Hash from the impl_array_newtype macro. Users will now need to derive these traits if they are wanted.

0.27.0 - 2023-03-15

0.26.0 - 2202-12-19

  • Update libsecp25k1 to v0.2.0

0.25.0 - 2022-12-07

0.24.1 - 2022-10-25

0.24.0 - 2022-07-20

0.23.4 - 2022-07-14

0.23.3 - 2022-06-29

0.23.2 - 2022-06-27

... (truncated)